index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
33,459
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/bigbird.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import numpy as np
import re
import tensorflow as tf
import torch
from os.path import abspath
from argparse import ArgumentParser
from transformers.utils import logging
from ..config.big_bird import PreTrained
from ...models.big_bird import ForPreTraining, ForQA
logging.set_verbosity_info()
log = logging.get_logger(__name__)
_SKIP = [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
def load_src_weights(model, src_path, is_trivia=False):
src_path = abspath(src_path)
log.info(f"Loading from: {src_path}")
xs = tf.saved_model.load(src_path).variables if is_trivia else tf.train.list_variables(src_path)
assert len(xs) > 0
pt_names = list(model.state_dict().keys())
if is_trivia:
ns, ws = _load_trivia(xs)
else:
ns, ws = _load_weights(xs, src_path)
for n in ns:
xs = n.split("/")
if any(x in _SKIP for x in xs):
log.info(f"Skipping {'/'.join(xs)}")
continue
ts = []
p = model
for x in xs:
if re.fullmatch(r"[A-Za-z]+_\d+", x):
scopes = re.split(r"_(\d+)", x)
else:
scopes = [x]
if scopes[0] == "kernel" or scopes[0] == "gamma":
p = getattr(p, "weight")
ts.append("weight")
elif scopes[0] == "output_bias" or scopes[0] == "beta":
p = getattr(p, "bias")
ts.append("bias")
elif scopes[0] == "output_weights":
p = getattr(p, "weight")
ts.append("weight")
elif scopes[0] == "squad":
p = getattr(p, "classifier")
ts.append("classifier")
elif scopes[0] == "transform":
p = getattr(p, "transform")
ts.append("transform")
if ("bias" in xs) or ("kernel" in xs):
p = getattr(p, "dense")
ts.append("dense")
elif ("beta" in xs) or ("gamma" in xs):
p = getattr(p, "LayerNorm")
ts.append("LayerNorm")
else:
try:
p = getattr(p, scopes[0])
ts.append(f"{scopes[0]}")
except AttributeError:
log.info(f"Skipping {x}")
continue
if len(scopes) >= 2:
i = int(scopes[1])
p = p[i]
ts.append(f"{i}")
w = ws[n]
if x[-11:] == "_embeddings" or x == "embeddings":
p = getattr(p, "weight")
ts.append("weight")
elif x == "kernel":
w = np.transpose(w)
if len(w.shape) > len(p.shape) and math.prod(w.shape) == math.prod(p.shape):
if (
n.endswith("attention/self/key/kernel")
or n.endswith("attention/self/query/kernel")
or n.endswith("attention/self/value/kernel")
):
w = w.transpose(1, 0, 2).reshape(p.shape)
elif n.endswith("attention/output/dense/kernel"):
w = w.transpose(0, 2, 1).reshape(p.shape)
else:
w = w.reshape(p.shape)
assert p.shape == w.shape
t = ".".join(ts)
log.info(f"Initialize {t} from {n}")
p.data = torch.from_numpy(w)
ws.pop(n, None)
pt_names.remove(t)
log.info(f"Not copied: {', '.join(ws.keys())}.")
log.info(f"Not initialized: {', '.join(pt_names)}.")
return model
def _load_weights(xs, src_path):
ns = []
ws = {}
for n, shape in xs:
n = n.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
log.info(f"Loading TF weight {n} with shape {shape}")
ns.append(n)
ws[n] = tf.train.load_variable(src_path, n)
return ns, ws
_MAP = {
"big_bird_attention": "attention/self",
"output_layer_norm": "output/LayerNorm",
"attention_output": "attention/output/dense",
"output": "output/dense",
"self_attention_layer_norm": "attention/output/LayerNorm",
"intermediate": "intermediate/dense",
"tok_embed": "bert/embeddings/tok_embed",
"pos_embed": "bert/embeddings/pos_embed",
"type_embeddings": "bert/embeddings/token_type_embeddings",
"embeddings": "bert/embeddings",
"layer_normalization": "output/LayerNorm",
"layer_norm": "LayerNorm",
"trivia_qa_head": "qa_classifier",
"dense": "intermediate/dense",
"dense_1": "qa_outputs",
}
def _load_trivia(xs):
ns = []
ws = {}
for i, x in enumerate(xs):
ks = x.name.split("/")
if "transformer_scaffold" in ks[0]:
ls = ks[0].split("_")
if len(ls) < 3:
ls += [0]
ks[0] = f"bert/encoder/layer_{ls[2]}"
n = "/".join([_MAP[k] if k in _MAP else k for k in ks])[:-2]
if "self/attention/output" in n:
n = n.replace("self/attention/output", "output")
if i >= len(xs) - 2:
n = n.replace("intermediate", "output")
log.info(f"Loading TF weight {n} with shape {x.shape}")
ns.append(n)
ws[n] = x.value().numpy()
return ns, ws
def to_pytorch(src_path, cfg_path, save_path, is_trivia):
cfg = PreTrained.from_json_file(cfg_path)
print(f"Building from config: {cfg}")
if is_trivia:
m = ForQA(cfg)
else:
m = ForPreTraining(cfg)
load_src_weights(m, src_path, is_trivia=is_trivia)
print(f"Saving to: {save_path}")
m.save_pretrained(save_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", default=None, type=str, required=True)
x.add_argument("--cfg_path", default=None, type=str, required=True)
x.add_argument("--save_path", default=None, type=str, required=True)
x.add_argument("--is_trivia", action="store_true")
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path, y.is_trivia)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,460
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/gpt_neo.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import forward as qf
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.gpt_neo import PreTrained
log = logging.get_logger(__name__)
LIST = [
"EleutherAI/gpt-neo-1.3B",
]
class SelfAttention(qc.Module):
def __init__(self, config, attention_type):
super().__init__()
max_positions = config.n_pos
bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
1, 1, max_positions, max_positions
)
if attention_type == "local":
bias = torch.bitwise_xor(bias, torch.tril(bias, -config.s_win))
self.register_buffer("bias", bias)
self.register_buffer("masked_bias", torch.tensor(-1e9))
self.attn_dropout = qc.Dropout(config.drop_attn)
self.drop_resid = qc.Dropout(config.drop_resid)
self.embed_dim = config.d_model
self.n_heads = config.n_heads
self.head_dim = self.embed_dim // self.n_heads
if self.head_dim * self.n_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by n_heads (got `embed_dim`: {self.embed_dim} and `n_heads`: {self.n_heads})."
)
self.k_proj = qc.Linear(self.embed_dim, self.embed_dim, bias=False)
self.v_proj = qc.Linear(self.embed_dim, self.embed_dim, bias=False)
self.q_proj = qc.Linear(self.embed_dim, self.embed_dim, bias=False)
self.out_proj = qc.Linear(self.embed_dim, self.embed_dim, bias=True)
def _split_heads(self, tensor, n_heads, attn_head_size):
new_shape = tensor.size()[:-1] + (n_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3)
def _merge_heads(self, tensor, n_heads, attn_head_size):
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (n_heads * attn_head_size,)
return tensor.view(new_shape)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
query = query.to(torch.float32)
key = key.to(torch.float32)
attn_weights = torch.matmul(query, key.transpose(-1, -2))
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = attn_weights.to(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def forward(
self,
hiddens,
attention_mask=None,
layer_past=None,
head_mask=None,
y_cache=False,
output_attentions=False,
):
query = self.q_proj(hiddens)
key = self.k_proj(hiddens)
value = self.v_proj(hiddens)
query = self._split_heads(query, self.n_heads, self.head_dim)
key = self._split_heads(key, self.n_heads, self.head_dim)
value = self._split_heads(value, self.n_heads, self.head_dim)
if layer_past is not None:
past_key = layer_past[0]
past_value = layer_past[1]
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if y_cache is True:
present = (key, value)
else:
present = None
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.n_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.drop_resid(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs
class Attention(qc.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attention_layers = config.attention_layers
self.attention_type = self.attention_layers[layer_id]
if self.attention_type in ["global", "local"]:
self.attention = SelfAttention(config, self.attention_type)
else:
raise NotImplementedError(
"Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: "
f"{config.attention_layers}. Select attn layer types from ['global', 'local'] only."
)
def forward(
self,
hiddens,
layer_past=None,
attention_mask=None,
head_mask=None,
y_cache=False,
output_attentions=False,
):
return self.attention(
hiddens,
attention_mask=attention_mask,
layer_past=layer_past,
head_mask=head_mask,
y_cache=y_cache,
output_attentions=output_attentions,
)
class MLP(qc.Module):
def __init__(self, d_ff, config):
super().__init__()
embed_dim = config.d_model
self.c_fc = qc.Linear(embed_dim, d_ff)
self.c_proj = qc.Linear(d_ff, embed_dim)
self.act = qu.activation(config.act)
self.drop = qc.Dropout(config.drop_resid)
def forward(self, x):
y = self.c_fc(x)
y = self.act(y)
y = self.c_proj(y)
y = self.drop(y)
return y
class Block(qc.Module):
def __init__(self, config, layer_id):
super().__init__()
d_model = config.d_model
inner_dim = config.d_ff if config.d_ff is not None else 4 * d_model
self.ln_1 = qc.LayerNorm(d_model, eps=config.eps)
self.attn = Attention(config, layer_id)
self.ln_2 = qc.LayerNorm(d_model, eps=config.eps)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hiddens,
layer_past=None,
attention_mask=None,
head_mask=None,
y_cache=False,
output_attentions=False,
):
residual = hiddens
hiddens = self.ln_1(hiddens)
attn_outputs = self.attn(
hiddens,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
y_cache=y_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
hiddens = attn_output + residual
residual = hiddens
hiddens = self.ln_2(hiddens)
feed_forward_model_states = self.mlp(hiddens)
hiddens = residual + feed_forward_model_states
if y_cache:
outputs = (hiddens,) + outputs
else:
outputs = (hiddens,) + outputs[1:]
return outputs
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.d_model
self.wte = qc.Embed(config.s_vocab, self.embed_dim)
self.wpe = qc.Embed(config.n_pos, self.embed_dim)
self.drop = qc.Dropout(config.drop_embed)
self.h = nn.ModuleList([Block(config, layer_id=i) for i in range(config.n_lays)])
self.ln_f = qc.LayerNorm(self.embed_dim, eps=config.eps)
self.gradient_checkpointing = False
def forward(
self,
input_ids=None,
caches=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
y_cache = y_cache if y_cache is not None else self.config.y_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if caches is None:
past_length = 0
caches = tuple([None] * len(self.h))
else:
past_length = caches[0][0].size(-2)
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(
past_length, input_shape[-1] + past_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hiddens = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hiddens = hiddens + token_type_embeds
hiddens = self.drop(hiddens)
output_shape = input_shape + (hiddens.size(-1),)
presents = () if y_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, caches)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if self.gradient_checkpointing and self.training:
if y_cache:
log.warning(
"`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..."
)
y_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, y_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hiddens,
None,
attention_mask,
head_mask[i],
)
else:
outputs = block(
hiddens,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
y_cache=y_cache,
output_attentions=output_attentions,
)
hiddens = outputs[0]
if y_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if y_cache else 1],)
hiddens = self.ln_f(hiddens)
hiddens = hiddens.view(output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [hiddens, presents, all_hidden_states, all_self_attentions]
if v is not None
)
return qo.BaseWithPast(
y=hiddens,
caches=presents,
hiddens=all_hidden_states,
attns=all_self_attentions,
)
class ForCausal(PreTrained):
def __init__(self, config):
super().__init__(config)
self.transformer = Model(config)
self.lm_head = qc.Linear(config.d_model, config.s_vocab, bias=False)
def forward(
self,
input_ids=None,
caches=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
caches=caches,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hiddens = transformer_outputs[0]
lm_logits = self.lm_head(hiddens)
loss = None
if labels is not None:
lm_logits = lm_logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
lm_logits = lm_logits.to(hiddens.dtype)
loss = loss.to(hiddens.dtype)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
caches=transformer_outputs.caches,
hiddens=transformer_outputs.hiddens,
attns=transformer_outputs.attns,
)
@staticmethod
def _reorder_cache(past, beam_idx):
return tuple(
tuple(
past_state.index_select(0, beam_idx.to(past_state.device))
for past_state in layer_past
)
for layer_past in past
)
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_seq
def post_proj(self, x):
cfg = self.cfg
b, _ = x.shape[:2]
if cfg.PAD is None:
n = -1
else:
assert b == 1
n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1
return x[torch.arange(b, device=self.device), n]
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,461
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/feature/perceiver.py
|
import numpy as np
from PIL import Image
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ImageFeatureExtractionMixin,
ImageInput,
is_torch_tensor,
)
from ...utils import TensorType, logging
logger = logging.get_logger(__name__)
class PerceiverFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ["pixel_values"]
def __init__(
self,
do_center_crop=True,
crop_size=256,
do_resize=True,
size=224,
resample=Image.BICUBIC,
do_normalize=True,
image_mean=None,
image_std=None,
**kw,
):
super().__init__(**kw)
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def center_crop(self, image):
"""
Crops `image` to *self.crop_size* using a center crop. Note that if the image is too small to be cropped to the
size given, it will be padded (so the returned result has the size asked).
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
"""
if isinstance(image, Image.Image):
image = self.to_numpy_array(image)
image_height, image_width = image.shape[-2:]
padded_center_crop_size = (
(self.size / (self.crop_size))
* np.minimum(image_height, image_width).astype(np.float32)
).astype(np.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [
offset_height,
offset_width,
padded_center_crop_size,
padded_center_crop_size,
]
image = image[
:,
crop_window[0] : crop_window[0] + crop_window[2],
crop_window[1] : crop_window[1] + crop_window[3],
]
return image
def __call__(self, images: ImageInput, return_tensors=None, **kw):
"""
Main method to prepare for the model one or several image(s).
<Tip warning={true}>
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
PIL images.
</Tip>
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if (
len(images) == 0
or isinstance(images[0], (Image.Image, np.ndarray))
or is_torch_tensor(images[0])
):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# transformations (center cropping + resizing + normalization)
if self.do_center_crop and self.crop_size is not None:
images = [self.center_crop(image) for image in images]
if self.do_resize and self.size is not None and self.resample is not None:
images = [
self.resize(image=image, size=self.size, resample=self.resample) for image in images
]
if self.do_normalize:
images = [
self.normalize(image=image, mean=self.image_mean, std=self.image_std)
for image in images
]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,462
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/t5.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.t5 import PreTrained
from ...models.t5 import ForCondGen
logging.set_verbosity_info()
log = logging.get_logger(__name__)
_SKIP = [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
def load_src_weights(model, config, src_path):
src_path = abspath(src_path)
log.info(f"Loading from: {src_path}")
xs = tf.train.list_variables(src_path)
names = []
tf_weights = {}
for name, shape in xs:
log.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(src_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
if any(n in _SKIP for n in name):
log.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
log.info(f"Skipping {'/'.join(name)}")
tf_weights.pop(txt_name, None)
continue
p = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scopes = re.split(r"_(\d+)", m_name)
else:
scopes = [m_name]
if scopes[0] in ["kernel", "scale", "embedding"]:
p = getattr(p, "weight")
elif scopes[0] == "self_attention":
p = getattr(p, "layer")
p = p[0]
elif scopes[0] == "enc_dec_attention":
p = getattr(p, "layer")
p = p[1]
elif scopes[0] == "dense_relu_dense":
p = getattr(p, "layer")
p = p[2]
elif scopes[0] == "rms_norm":
if hasattr(p, "layer_norm"):
p = getattr(p, "layer_norm")
elif hasattr(p, "final_layer_norm"):
p = getattr(p, "final_layer_norm")
elif scopes[0] == "scale":
p = getattr(p, "weight")
elif scopes[0] == "output_bias" or scopes[0] == "beta":
p = getattr(p, "bias")
elif scopes[0] == "squad":
p = getattr(p, "classifier")
elif scopes[0] == "decoder" and name[1] == "logits":
continue
elif scopes[0] == "logits":
p = getattr(p, "lm_head")
elif scopes[0] == "wi" and len(scopes) > 1 and scopes[1].isdigit():
p = getattr(p, f"wi_{scopes[1]}")
continue
else:
try:
p = getattr(p, scopes[0])
except AttributeError:
log.info(f"Skipping {'/'.join(name)}")
continue
if len(scopes) >= 2:
p = p[int(scopes[1])]
if scopes[0] not in ["kernel", "scale", "embedding"]:
p = getattr(p, "weight")
if scopes[0] != "embedding":
log.info(f"Transposing numpy weight of shape {array.shape} for {name}")
array = np.transpose(array)
assert p.shape == array.shape
log.info(f"Initialize PyTorch weight {name}")
p.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
log.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
return model
def to_pytorch(src_path, cfg_path, save_path):
cfg = PreTrained.from_json_file(cfg_path)
print(f"Building from config: {cfg}")
m = ForCondGen(cfg)
load_src_weights(m, cfg, src_path)
print(f"Saving to: {save_path}")
m.save_pretrained(save_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", default=None, type=str, required=True)
x.add_argument("--cfg_path", default=None, type=str, required=True)
x.add_argument("--save_path", default=None, type=str, required=True)
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,463
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/deberta2.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
[],
dict(
act="gelu",
d_ff=6144,
d_hidden=1536,
drop_attn=0.1,
drop=0.1,
eps=1e-7,
init_range=0.02,
max_relative_positions=-1,
model_type="deberta-v2",
n_heads=24,
n_lays=24,
n_pos=512,
n_typ=0,
PAD=0,
pooler_dropout=0,
pooler_hidden_act="gelu",
pos_att_type=None,
position_biased_input=True,
relative_attention=False,
s_vocab=128100,
grad_checkpoint=True,
),
)
def _init_weights(self, module):
if isinstance(module, qc.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, qc.Embed):
module.weight.data.normal_(mean=0.0, std=self.config.init_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DebertaV2Encoder):
module.gradient_checkpointing = value
MAP = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json",
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,464
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/nanogpt.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import inspect
from dataclasses import dataclass
import torch
import torch.nn as nn
from torch.nn import functional as F
from .. import core as qc
from ..core import utils as qu
class MLP(nn.Module):
def __init__(self, cfg):
super().__init__()
self.c_fc = nn.Linear(cfg.d_model, 4 * cfg.d_model, bias=cfg.bias)
self.proj = nn.Linear(4 * cfg.d_model, cfg.d_model, bias=cfg.bias)
self.drop = nn.Dropout(cfg.drop)
self.act = qu.activation("gelu_new")
def forward(self, x):
x = self.c_fc(x)
x = self.act(x)
x = self.proj(x)
x = self.drop(x)
return x
class Block(nn.Module):
def __init__(self, cfg):
super().__init__()
self.ln_1 = qc.LayerNorm(cfg.d_model, bias=cfg.bias)
self.attn = Attention(cfg)
self.ln_2 = qc.LayerNorm(cfg.d_model, bias=cfg.bias)
self.mlp = MLP(cfg)
def forward(self, x):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Attention(qc.Module):
hs = qc.Hypers({"d_model", "drop", "n_heads", "n_pos"})
def __init__(self, is_cross=False, lay_i=None, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
d, h = cfg.d_model, cfg.n_heads
assert d % h == 0
self.attn = nn.Linear(d, 3 * d, bias=cfg.bias)
self.proj = nn.Linear(d, d, bias=cfg.bias)
self.drop_attn = nn.Dropout(cfg.drop)
self.drop = nn.Dropout(cfg.drop)
self.flash = hasattr(torch.nn.functional, "scaled_dot_product_attention")
if not self.flash:
p, t = cfg.n_pos, torch.bool
self.register_buffer("bias", torch.tril(torch.ones((p, p), dtype=t)).view(1, 1, p, p))
def forward(self, x):
cfg = self.cfg
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_hidden)
q, k, v = self.attn(x).split(cfg.d_model, dim=2)
k = k.view(B, T, cfg.n_heads, C // cfg.n_heads).transpose(1, 2)
q = q.view(B, T, cfg.n_heads, C // cfg.n_heads).transpose(1, 2)
v = v.view(B, T, cfg.n_heads, C // cfg.n_heads).transpose(1, 2)
if self.flash:
y = torch.nn.functional.scaled_dot_product_attention(
q,
k,
v,
attn_mask=None,
dropout_p=cfg.drop if self.training else 0,
is_causal=True,
)
else:
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float("-inf"))
att = F.softmax(att, dim=-1)
att = self.drop_attn(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C)
y = self.drop(self.proj(y))
return y
class GPT(nn.Module):
def __init__(self, cfg):
super().__init__()
assert cfg.s_vocab is not None
assert cfg.n_pos is not None
self.cfg = cfg
self.transformer = nn.ModuleDict(
dict(
wte=nn.Embedding(cfg.s_vocab, cfg.d_model),
wpe=nn.Embedding(cfg.n_pos, cfg.d_model),
drop=nn.Dropout(cfg.drop),
h=nn.ModuleList([Block(cfg) for _ in range(cfg.n_layer)]),
ln_f=LayerNorm(cfg.d_model, bias=cfg.bias),
)
)
self.lm_head = nn.Linear(cfg.d_model, cfg.s_vocab, bias=False)
# with weight tying when using torch.compile() some warnings get generated:
# "UserWarning: functional_call was passed multiple values for tied weights.
# This behavior is deprecated and will be an error in future versions"
# not 100% sure what this is, so far seems to be harmless. TODO investigate
self.transformer.wte.weight = (
self.lm_head.weight
) # https://paperswithcode.com/method/weight-tying
# init all weights
self.apply(self._init_weights)
# apply special scaled init to the residual projections, per GPT-2 paper
for pn, p in self.named_parameters():
if pn.endswith("proj.weight"):
torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * cfg.n_layer))
# report number of parameters
print("number of parameters: %.2fM" % (self.get_num_params() / 1e6,))
def get_num_params(self, non_embedding=True):
"""
Return the number of parameters in the model.
For non-embedding count (default), the position embeddings get subtracted.
The token embeddings would too, except due to the parameter sharing these
params are actually used as weights in the final layer, so we include them.
"""
n_params = sum(p.numel() for p in self.parameters())
if non_embedding:
n_params -= self.transformer.wpe.weight.numel()
return n_params
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, idx, targets=None):
device = idx.device
b, t = idx.size()
assert (
t <= self.cfg.n_pos
), f"Cannot forward sequence of length {t}, block size is only {self.cfg.n_pos}"
pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
# forward the GPT model itself
tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_hidden)
pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_hidden)
x = self.transformer.drop(tok_emb + pos_emb)
for block in self.transformer.h:
x = block(x)
x = self.transformer.ln_f(x)
if targets is not None:
# if we are given some desired targets also calculate the loss
logits = self.lm_head(x)
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1
)
else:
# inference-time mini-optimization: only forward the lm_head on the very last position
logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
loss = None
return logits, loss
def crop_n_pos(self, n_pos):
# model surgery to decrease the block size if necessary
# e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
# but want to use a smaller block size for some smaller, simpler model
assert n_pos <= self.cfg.n_pos
self.cfg.n_pos = n_pos
self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:n_pos])
for block in self.transformer.h:
if hasattr(block.attn, "bias"):
block.attn.bias = block.attn.bias[:, :, :n_pos, :n_pos]
@classmethod
def from_pretrained(cls, model_type, override_args=None):
assert model_type in {"gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl"}
override_args = override_args or {} # default to empty dict
# only dropout can be overridden see more notes below
assert all(k == "dropout" for k in override_args)
from transformers import GPT2LMHeadModel
print("loading weights from pretrained gpt: %s" % model_type)
# n_layer, n_head and n_hidden are determined from model_type
cfg_args = {
"gpt2": dict(n_layer=12, n_head=12, n_hidden=768), # 124M params
"gpt2-medium": dict(n_layer=24, n_head=16, n_hidden=1024), # 350M params
"gpt2-large": dict(n_layer=36, n_head=20, n_hidden=1280), # 774M params
"gpt2-xl": dict(n_layer=48, n_head=25, n_hidden=1600), # 1558M params
}[model_type]
print("forcing s_vocab=50257, n_pos=1024, bias=True")
cfg_args["s_vocab"] = 50257 # always 50257 for GPT model checkpoints
cfg_args["n_pos"] = 1024 # always 1024 for GPT model checkpoints
cfg_args["bias"] = True # always True for GPT model checkpoints
# we can override the dropout rate, if desired
if "dropout" in override_args:
print(f"overriding dropout rate to {override_args['dropout']}")
cfg_args["dropout"] = override_args["dropout"]
# create a from-scratch initialized minGPT model
cfg = GPTcfg(**cfg_args)
model = GPT(cfg)
sd = model.state_dict()
sd_keys = sd.keys()
sd_keys = [
k for k in sd_keys if not k.endswith(".attn.bias")
] # discard this mask / buffer, not a param
# init a huggingface/transformers model
model_hf = GPT2LMHeadModel.from_pretrained(model_type)
sd_hf = model_hf.state_dict()
# copy while ensuring all of the parameters are aligned and match in names and shapes
sd_keys_hf = sd_hf.keys()
sd_keys_hf = [
k for k in sd_keys_hf if not k.endswith(".attn.bias_m")
] # ignore these, just a buffer
sd_keys_hf = [
k for k in sd_keys_hf if not k.endswith(".attn.bias")
] # same, just the mask (buffer)
transposed = [
"attn.attn.weight",
"attn.proj.weight",
"mlp.c_fc.weight",
"mlp.proj.weight",
]
# basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
# this means that we have to transpose these weights when we import them
assert len(sd_keys_hf) == len(
sd_keys
), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
for k in sd_keys_hf:
if any(k.endswith(w) for w in transposed):
# special treatment for the Conv1D weights we need to transpose
assert sd_hf[k].shape[::-1] == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k].t())
else:
# vanilla copy over the other parameters
assert sd_hf[k].shape == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k])
return model
def cfgure_optimizers(self, weight_decay, learning_rate, betas, device_type):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear,)
blacklist_weight_modules = (torch.nn.LayerNorm, LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = "%s.%s" % (mn, pn) if mn else pn # full param name
# random note: because named_modules and named_parameters are recursive
# we will see the same tensors p many many times. but doing it this way
# allows us to know which parent module any tensor p belongs to...
if pn.endswith("bias"):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith("weight") and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith("weight") and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# subtle: 'transformer.wte.weight' and 'lm_head.weight' are tied, so they
# will appear in the no_decay and decay sets respectively after the above.
# In addition, because named_parameters() doesn't return duplicates, it
# will only return the first occurence, key'd by 'transformer.wte.weight', below.
# so let's manually remove 'lm_head.weight' from decay set. This will include
# this tensor into optimization via transformer.wte.weight only, and not decayed.
decay.remove("lm_head.weight")
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (
str(inter_params),
)
assert (
len(param_dict.keys() - union_params) == 0
), "parameters %s were not separated into either decay/no_decay set!" % (
str(param_dict.keys() - union_params),
)
# create the pytorch optimizer object
optim_groups = [
{
"params": [param_dict[pn] for pn in sorted(list(decay))],
"weight_decay": weight_decay,
},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# new PyTorch nightly has a new 'fused' option for AdamW that is much faster
use_fused = (device_type == "cuda") and (
"fused" in inspect.signature(torch.optim.AdamW).parameters
)
print(f"using fused AdamW: {use_fused}")
extra_args = dict(fused=True) if use_fused else dict()
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
return optimizer
def estimate_mfu(self, fwdbwd_per_iter, dt):
"""estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS"""
# first estimate the number of flops we do per iteration.
# see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
N = self.get_num_params()
cfg = self.cfg
L, H, Q, T = cfg.n_layer, cfg.n_heads, cfg.d_model // cfg.n_heads, cfg.n_pos
flops_per_token = 6 * N + 12 * L * H * Q * T
flops_per_fwdbwd = flops_per_token * T
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
# express our flops throughput as ratio of A100 bfloat16 peak flops
flops_achieved = flops_per_iter * (1.0 / dt) # per second
flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
mfu = flops_achieved / flops_promised
return mfu
@torch.no_grad()
def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
"""
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
"""
for _ in range(max_new_tokens):
# if the sequence context is growing too long we must crop it at n_pos
idx_cond = idx if idx.size(1) <= self.cfg.n_pos else idx[:, -self.cfg.n_pos :]
# forward the model to get the logits for the index in the sequence
logits, _ = self(idx_cond)
# pluck the logits at the final step and scale by desired temperature
logits = logits[:, -1, :] / temperature
# optionally crop the logits to only the top k options
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
logits[logits < v[:, [-1]]] = -float("Inf")
# apply softmax to convert logits to (normalized) probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution
idx_next = torch.multinomial(probs, num_samples=1)
# append sampled index to the running sequence and continue
idx = torch.cat((idx, idx_next), dim=1)
return idx
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,465
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/metric/sacrebleu.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import sacrebleu as scb
import datasets as ds
class Sacrebleu(ds.Metric):
def _info(self):
return ds.MetricInfo(
description="",
citation="",
homepage="",
inputs_description="",
features=ds.Features(
{
"predictions": ds.Value("string", id="sequence"),
"references": ds.Sequence(ds.Value("string", id="sequence"), id="references"),
}
),
)
def _compute(
self,
preds,
refs,
smooth_method="exp",
smooth_value=None,
force=False,
lowercase=False,
tokenize=None,
use_effective_order=False,
):
references_per_prediction = len(refs[0])
if any(len(refs) != references_per_prediction for refs in refs):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
transformed_references = [
[refs[i] for refs in refs] for i in range(references_per_prediction)
]
y = scb.corpus_bleu(
preds,
transformed_references,
smooth_method=smooth_method,
smooth_value=smooth_value,
force=force,
lowercase=lowercase,
use_effective_order=use_effective_order,
**(dict(tokenize=tokenize) if tokenize else {}),
)
return {
"score": y.score,
"counts": y.counts,
"totals": y.totals,
"precisions": y.precisions,
"bp": y.bp,
"sys_len": y.sys_len,
"ref_len": y.ref_len,
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,466
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fsmt.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
import os
import re
import unicodedata
import sacremoses as sm
from ...tokens.utils import PreTrainedTokenizer
VOCAB_FS = {
"src_vocab_file": "vocab-src.json",
"tgt_vocab_file": "vocab-tgt.json",
"merges_file": "merges.txt",
}
VOCAB_MAP = {
"src_vocab_file": {
"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-src.json"
},
"tgt_vocab_file": {
"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/vocab-tgt.json"
},
"merges_file": {
"stas/tiny-wmt19-en-de": "https://huggingface.co/stas/tiny-wmt19-en-de/resolve/main/merges.txt"
},
}
INPUT_CAPS = {"stas/tiny-wmt19-en-de": 1024}
PRETRAINED_INIT_CONFIGURATION = {
"stas/tiny-wmt19-en-de": {
"langs": ["en", "de"],
"model_max_length": 1024,
"special_tokens_map_file": None,
"full_tokenizer_file": None,
}
}
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def replace_unicode_punct(text):
text = text.replace(",", ",")
text = re.sub(r"。\s*", ". ", text)
text = text.replace("、", ",")
text = text.replace("”", '"')
text = text.replace("“", '"')
text = text.replace("∶", ":")
text = text.replace(":", ":")
text = text.replace("?", "?")
text = text.replace("《", '"')
text = text.replace("》", '"')
text = text.replace(")", ")")
text = text.replace("!", "!")
text = text.replace("(", "(")
text = text.replace(";", ";")
text = text.replace("1", "1")
text = text.replace("」", '"')
text = text.replace("「", '"')
text = text.replace("0", "0")
text = text.replace("3", "3")
text = text.replace("2", "2")
text = text.replace("5", "5")
text = text.replace("6", "6")
text = text.replace("9", "9")
text = text.replace("7", "7")
text = text.replace("8", "8")
text = text.replace("4", "4")
text = re.sub(r".\s*", ". ", text)
text = text.replace("~", "~")
text = text.replace("’", "'")
text = text.replace("…", "...")
text = text.replace("━", "-")
text = text.replace("〈", "<")
text = text.replace("〉", ">")
text = text.replace("【", "[")
text = text.replace("】", "]")
text = text.replace("%", "%")
return text
def remove_non_printing_char(text):
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith("C"):
continue
output.append(char)
return "".join(output)
class Tokenizer(PreTrainedTokenizer):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
input_caps = INPUT_CAPS
model_input_names = ["input_ids", "mask"]
def __init__(
self,
langs=None,
src_vocab_file=None,
tgt_vocab_file=None,
merges_file=None,
do_lower_case=False,
unk="<unk>",
bos="<s>",
sep="</s>",
pad="<pad>",
**kw,
):
super().__init__(
langs=langs,
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
do_lower_case=do_lower_case,
unk=unk,
bos=bos,
sep=sep,
pad=pad,
**kw,
)
self.src_vocab_file = src_vocab_file
self.tgt_vocab_file = tgt_vocab_file
self.merges_file = merges_file
self.do_lower_case = do_lower_case
self.cache_moses_punct_normalizer = dict()
self.cache_moses_tokenizer = dict()
self.cache_moses_detokenizer = dict()
if langs and len(langs) == 2:
self.src_lang, self.tgt_lang = langs
else:
raise ValueError(
f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
"Usually that means that tokenizer can't find a mapping for the given model path "
"in VOCAB_MAP, and other maps of this tokenizer."
)
with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
self.encoder = json.load(src_vocab_handle)
with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
tgt_vocab = json.load(tgt_vocab_handle)
self.decoder = {v: k for k, v in tgt_vocab.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def get_vocab(self):
return self.get_src_vocab()
@property
def s_vocab(self):
return self.s_src_vocab
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
return self.cache_moses_punct_normalizer[lang].normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
return self.cache_moses_tokenizer[lang].tokenize(
text, aggressive_dash_splits=True, return_str=False, escape=True
)
def moses_detokenize(self, tokens, lang):
if lang not in self.cache_moses_tokenizer:
moses_detokenizer = sm.MosesDetokenizer(lang=self.tgt_lang)
self.cache_moses_detokenizer[lang] = moses_detokenizer
return self.cache_moses_detokenizer[lang].detokenize(tokens)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
@property
def s_src_vocab(self):
return len(self.encoder)
@property
def s_tgt_vocab(self):
return len(self.decoder)
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.decoder, **self.added_tokens_decoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
lang = self.src_lang
if self.do_lower_case:
text = text.lower()
if bypass_tokenizer:
text = text.split()
else:
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk)
def convert_tokens_to_string(self, tokens):
tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
tokens = "".join(tokens).split()
text = self.moses_detokenize(tokens, self.tgt_lang)
return text
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
sep = [self.SEP]
if toks_1 is None:
return toks_0 + sep
return toks_0 + sep + toks_1 + sep
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True)
if toks_1 is not None:
return ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1]
return ([0] * len(toks_0)) + [1]
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.SEP]
if toks_1 is None:
return len(toks_0 + sep) * [0]
return len(toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
def save_vocabulary(self, dir, pre=None):
src_vocab_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["src_vocab_file"],
)
tgt_vocab_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["tgt_vocab_file"],
)
merges_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["merges_file"],
)
with open(src_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
with open(tgt_vocab_file, "w", encoding="utf-8") as f:
tgt_vocab = {v: k for k, v in self.decoder.items()}
f.write(json.dumps(tgt_vocab, ensure_ascii=False))
index = 0
with open(merges_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return src_vocab_file, tgt_vocab_file, merges_file
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,467
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/xlate.py
|
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune on text translation
import logging
import numpy as np
import random
import torch
from datasets import load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoModelForSeq2SeqLM,
DataCollatorForSeq2Seq,
MBartTokenizer,
MBartTokenizerFast,
default_data_collator,
)
from .params import TRAIN, EVAL, ALL
from .runner import Runner as Base
log = logging.getLogger(__name__)
def postproc(xs, ls):
xs = [x.strip() for x in xs]
ls = [[x.strip()] for x in ls]
return xs, ls
class Runner(Base):
AutoModel = AutoModelForSeq2SeqLM
@property
def tokenizer(self):
if self._tokenizer is None:
ps = self.params
t = super().tokenizer
if isinstance(t, (MBartTokenizer, MBartTokenizerFast)):
if ps.source_lang is not None:
t.src_lang = ps.source_lang
if ps.target_lang is not None:
t.tgt_lang = ps.target_lang
self.source_lang = ps.source_lang.split("_")[0]
self.target_lang = ps.target_lang.split("_")[0]
self.prefix = ps.source_prefix if ps.source_prefix is not None else ""
return self._tokenizer
@property
def model(self):
if self._model is None:
ps = self.params
t, m = self.tokenizer, super().model
if m.config.dec_START is None and isinstance(t, (MBartTokenizer, MBartTokenizerFast)):
assert (
ps.target_lang is not None and ps.source_lang is not None
), "mBart needs --target_lang and --source_lang"
if isinstance(t, MBartTokenizer):
m.config.dec_START = t.lang_code_to_id[ps.target_lang]
else:
m.config.dec_START = t.convert_tokens_to_ids(ps.target_lang)
if m.config.dec_START is None:
raise ValueError("Needs `config.dec_START`")
@property
def train_ds(self):
if self._train_ds is None:
ps, mgr, ds = self.params, self.mgr, self.dataset
with mgr.main_process_first():
self._dataset = y = ds.map(
self.prep_for_train,
batched=True,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on dataset",
)
y = y[TRAIN]
if ps.max_train_samples is not None:
y = y.select(range(ps.max_train_samples))
for i in random.sample(range(len(y)), 3):
log.info(f"Sample {i} of the training set: {y[i]}")
self._train_ds = y
return self._train_ds
def prep_for_train(self, xs):
ps, t = self.params, self.tokenizer
ins = [x[self.source_lang] for x in xs["translation"]]
targets = [x[self.target_lang] for x in xs["translation"]]
ins = [self.prefix + x for x in ins]
ys = t(ins, max_len=ps.max_source_length, padding=ps.padding, truncation=True)
with t.as_target_tokenizer():
ls = t(targets, max_len=ps.max_target_length, padding=ps.padding, truncation=True)
if self.padding == "max_len" and ps.ignore_pad_token_for_loss:
ls["input_ids"] = [[(y if y != t.PAD else -100) for y in x] for x in ls["input_ids"]]
ys["labels"] = ls["input_ids"]
return ys
@property
def loaders(self):
if self._loaders is None:
ps, t = self.params, self.tokenizer
if ps.pad_to_max_length:
c = default_data_collator
else:
c = DataCollatorForSeq2Seq(
t,
model=self.model,
label_pad_token_id=-100 if ps.ignore_pad_token_for_loss else t.PAD,
pad_to_multiple_of=8 if self.mgr.use_fp16 else None,
)
t = DataLoader(
self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.train_batch_size
)
e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.eval_batch_size)
self._loaders = {TRAIN: t, EVAL: e}
return self._loaders
@property
def metric(self):
if self._metric is None:
self._metric = load_metric("sacrebleu")
return self._metric
def eval_epoch(self, e):
ps, t, m, mgr = self.params, self.tokenizer, self.model, self.mgr
m.eval()
if ps.val_max_target_length is None:
ps.val_max_target_length = ps.max_target_length
kw = {
"max_len": ps.val_max_target_length if ps is not None else self.config.max_len,
"n_beams": ps.n_beams,
}
for xs in self.loaders[EVAL]:
with torch.no_grad():
ys = mgr.unwrap_model(m).generate(xs["input_ids"], mask=xs["mask"], **kw)
ys = mgr.pad_across_processes(ys, dim=1, PAD=t.PAD)
ls = xs["labels"]
if not ps.pad_to_max_length:
ls = mgr.pad_across_processes(xs["labels"], dim=1, PAD=t.PAD)
ys = mgr.gather(ys).cpu().numpy()
ls = mgr.gather(ls).cpu().numpy()
if ps.ignore_pad_token_for_loss:
ls = np.where(ls != -100, ls, t.PAD)
ys = t.batch_decode(ys, skip_special_tokens=True)
ls = t.batch_decode(ls, skip_special_tokens=True)
ys, ls = postproc(ys, ls)
self.metric.add_batch(predictions=ys, references=ls)
y = self.metric.compute()["score"]
mgr.print(f"epoch {e}: bleu: {y}")
def main():
x = Runner()
x.dataset
x.config
x.tokenizer
x.model
x.model.resize_token_embeddings(len(x.tokenizer))
x.loaders
x.prepare()
x.train()
x.save()
if __name__ == "__main__":
main()
"""
python xlate.py \
--model_name Helsinki-NLP/opus-mt-en-ro \
--source_lang en \
--target_lang ro \
--dataset_name wmt16 \
--dataset_config ro-en \
--out_dir ~/tmp/tst-translation
accelerate launch xlate.py \
--model_name Helsinki-NLP/opus-mt-en-ro \
--source_lang en \
--target_lang ro \
--dataset_name wmt16 \
--dataset_config ro-en \
--out_dir ~/tmp/tst-translation
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,468
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/deberta.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ...tokens.utils import AddedToken
from .gpt2 import Tokenizer as GPT2
VOCAB_FS = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
VOCAB_MAP = {
"vocab_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/vocab.json",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/vocab.json",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/vocab.json",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/vocab.json",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/vocab.json",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/vocab.json",
},
"merges_file": {
"microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/merges.txt",
"microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/merges.txt",
"microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/merges.txt",
"microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/merges.txt",
"microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/merges.txt",
"microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/merges.txt",
},
}
INPUT_CAPS = {
"microsoft/deberta-base": 512,
"microsoft/deberta-large": 512,
"microsoft/deberta-xlarge": 512,
"microsoft/deberta-base-mnli": 512,
"microsoft/deberta-large-mnli": 512,
"microsoft/deberta-xlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-base": {"do_lower_case": False},
"microsoft/deberta-large": {"do_lower_case": False},
}
class Tokenizer(GPT2):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos="[CLS]",
eos="[SEP]",
sep="[SEP]",
cls="[CLS]",
unk="[UNK]",
pad="[PAD]",
msk="[MASK]",
add_prefix_space=False,
**kw,
):
bos = AddedToken(bos, lstrip=False, rstrip=False) if isinstance(bos, str) else bos
eos = AddedToken(eos, lstrip=False, rstrip=False) if isinstance(eos, str) else eos
sep = AddedToken(sep, lstrip=False, rstrip=False) if isinstance(sep, str) else sep
cls = AddedToken(cls, lstrip=False, rstrip=False) if isinstance(cls, str) else cls
unk = AddedToken(unk, lstrip=False, rstrip=False) if isinstance(unk, str) else unk
pad = AddedToken(pad, lstrip=False, rstrip=False) if isinstance(pad, str) else pad
msk = AddedToken(msk, lstrip=True, rstrip=False) if isinstance(msk, str) else msk
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
bos=bos,
eos=eos,
unk=unk,
sep=sep,
cls=cls,
pad=pad,
msk=msk,
add_prefix_space=add_prefix_space,
**kw,
)
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return [self.cls_token_id] + toks_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + toks_0 + sep + toks_1 + sep
def get_special_tokens_mask(self, toks_0, toks_1=None, has_specials=False):
if has_specials:
return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True)
if toks_1 is None:
return [1] + ([0] * len(toks_0)) + [1]
return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1]
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
return len(cls + toks_0 + sep + toks_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kw):
add_prefix_space = kw.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kw)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,469
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/ops/blocksparse/softmax.py
|
import torch
import triton
import triton.language as tl
def num_warps(n):
if n <= 128:
return 1
if n <= 256:
return 2
if n <= 512:
return 4
if n <= 4096:
return 8
return 16
@triton.jit
def _blocksparse_softmax_fwd(
Out, A, stride_xz, LUT,
R, extent, stride_zr, stride_hr, # relative attention
scale, is_causal,
ROW_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
IS_DENSE: tl.constexpr,
):
h = tl.program_id(0)
m = tl.program_id(1)
z = tl.program_id(2)
# create index ranges
hm = h * tl.num_programs(1) + m
lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE
block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE
# extract information from LUT
header = LUT + (hm // BLOCK_SIZE) * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# pointer offset
off_a = z * stride_xz
off_a += (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE # block indx
off_a += (m % BLOCK_SIZE) * BLOCK_SIZE # row indx
# do not need to read column indices in the dense case
if IS_DENSE:
ns = tl.arange(0, ROW_SIZE)
else:
off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE
start_n = tl.load(LUT + off_lut + block_n, mask=block_n < size, other=0)
ns = start_n * BLOCK_SIZE + lane_n
# load X
mask = block_n < size
a = tl.load(A + off_a + lane_n, mask=mask, other=-float("inf"))
a = a.to(tl.float32)
# compute
out = a
out *= scale
# apply relative attention
if R is not None:
R += z * stride_zr
R += h * stride_hr
off_lo = (extent - m - 1) + ns
mask_lo = (off_lo >= 0) & (off_lo < extent)
rel_logits = tl.load(R + m * extent + off_lo, mask=mask_lo, other=0.0)
out += rel_logits
out = out.to(tl.float32)
# apply causal mask
out = tl.where((ns > m) & is_causal, -float("inf"), out)
# computation
out = tl.softmax(out)
# write-back
tl.store(Out + off_a + lane_n, out, mask=mask)
@triton.jit
def _blocksparse_softmax_bwd(
DA, stride_zdx,
DOut, stride_zdout,
Out, stride_zout,
scale,
LUT,
DR, extent, stride_zr, stride_hr, stride_er,
is_causal,
ROW_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
IS_DENSE: tl.constexpr,
):
h = tl.program_id(0)
m = tl.program_id(1)
z = tl.program_id(2)
# create index ranges
hm = h * tl.num_programs(1) + m
lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE
block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE
# extract information from LUT
header = LUT + (hm // BLOCK_SIZE) * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# row-col offset
off_mn = (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE
off_mn += (m % BLOCK_SIZE) * BLOCK_SIZE
mask = block_n < size
# pointers
As = Out + z * stride_zout + off_mn
DOuts = DOut + z * stride_zdout + off_mn
# do not need to read column indices in the dense case
if IS_DENSE:
ns = tl.arange(0, ROW_SIZE)
else:
off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE
start_n = tl.load(LUT + off_lut + block_n, mask=mask, other=0)
ns = start_n * BLOCK_SIZE + lane_n
# load data
a = tl.load(As + lane_n, mask=mask, other=0.0)
a = a.to(tl.float32)
dout = tl.load(DOuts + lane_n, mask=mask, other=0.0)
dout = dout.to(tl.float32)
# compute
a = tl.where((ns > m) & is_causal & (a == a), 0., a)
da = a * (dout - tl.sum(a * dout, 0))
# apply relative attention
if DR is not None:
DR += z * stride_zr
DR += h * stride_hr
off_lo = (extent - m - 1) + ns
mask_lo = (off_lo >= 0) & (off_lo < extent) & mask
tl.store(DR + m * extent + off_lo, da, mask=mask_lo)
da = da * scale
# convert da
# write-back
DAs = DA + z * stride_zdx + off_mn
tl.store(DAs + lane_n, da, mask=mask)
class _softmax(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
total_sizes = sizes * block
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
columns = layout.nonzero(as_tuple=False)[:, 2]
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, columns)).type(torch.int32).to(device)
return lut, int(total_sizes.max())
@staticmethod
def forward(
ctx, a, scale, rel_logits, is_causal,
spdims, block, lut, maxlut, is_dense
):
if scale is not None and isinstance(scale, torch.Tensor):
assert scale.device.type == "cpu"
scale = scale.item()
M = a.shape[0]
grid = [spdims[0], spdims[1] * block, M]
rel_shape = (1, 1, 1, 1) if rel_logits is None else rel_logits.shape
rel_strides = (1, 1, 1, 1) if rel_logits is None else rel_logits.stride()
# enqueue kernel
out = torch.empty_like(a)
_blocksparse_softmax_fwd[grid](
out, a, a.stride(0), lut,
rel_logits, rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn
scale,
is_causal,
BLOCK_SIZE=block,
ROW_SIZE=triton.next_power_of_2(maxlut),
IS_DENSE=is_dense,
num_warps=num_warps(maxlut)
)
# save to context
# ctx.mark_dirty(x)
ctx.save_for_backward(out, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
ctx.scale = scale
ctx.rel_shape = rel_shape
ctx.rel_strides = rel_strides
ctx.rel_dtype = a.dtype
ctx.is_dense = is_dense
ctx.is_causal = is_causal
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
out, lut = ctx.saved_tensors
# relative logits gradients
dr = None
if ctx.needs_input_grad[3]:
dr = torch.zeros(ctx.rel_shape, dtype=ctx.rel_dtype, device=out.device)
# run kernel
M = out.shape[0]
grid = (ctx.spdims[0], ctx.spdims[1] * ctx.block, M)
da = torch.empty_like(dout)
_blocksparse_softmax_bwd[grid](
da, da.stride(0),
dout, dout.stride(0),
out, out.stride(0),
ctx.scale,
lut,
dr, ctx.rel_shape[-1], ctx.rel_strides[0], ctx.rel_strides[1], ctx.rel_strides[2],
ctx.is_causal,
BLOCK_SIZE=ctx.block,
ROW_SIZE=triton.next_power_of_2(ctx.maxlut),
IS_DENSE=ctx.is_dense,
num_warps=num_warps(ctx.maxlut)
)
return (da, None, None, dr, None,
None, None, None, None, None,
None,
None, None, None,
None,
None, None, None
)
class softmax:
def __init__(self, layout, block, device, is_dense=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.lut, self.maxlut = _softmax.make_lut(self.layout, self.block, device)
self.is_dense = is_dense
def __call__(self, a, *, scale=1.0, rel_logits=None, is_causal=False):
if rel_logits is not None and rel_logits.dtype != a.dtype:
raise ValueError(f"relative position embedding must be {a.dtype}")
a = _softmax.apply(
a, scale, rel_logits, is_causal,
self.spdims, self.block, self.lut, self.maxlut, self.is_dense,
)
return a
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,470
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/dataset/xnli.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import csv
import datasets as ds
from os.path import join
_URL = "https://dl.fbaipublicfiles.com/XNLI/"
_URLS = {
"train": _URL + "XNLI-MT-1.0.zip",
"valid": _URL + "XNLI-1.0.zip",
}
_LANGS = ("de", "en")
class Xnli(ds.GeneratorBasedBuilder):
BUILDER_CONFIGS = [ds.BuilderConfig(name=x, version=ds.Version("1.1.0")) for x in _LANGS]
def _info(self):
return ds.DatasetInfo(
description="",
citation="",
homepage="",
license="",
features=ds.Features(
{
"premise": ds.Value("string"),
"hypothesis": ds.Value("string"),
"label": ds.ClassLabel(names=["entailment", "neutral", "contradiction"]),
}
),
)
def _split_generators(self, mgr):
fs = mgr.download_and_extract(_URLS)
t = join(fs["train"], "XNLI-MT-1.0", "multinli")
v = join(fs["valid"], "XNLI-1.0")
return [
ds.SplitGenerator(
name=ds.Split.TRAIN,
gen_kw={
"filepaths": join(t, f"multinli.train.{self.config.name}.tsv"),
"data_format": "XNLI-MT",
},
),
ds.SplitGenerator(
name=ds.Split.TEST,
gen_kw={"filepaths": [join(v, "xnli.test.tsv")], "data_format": "XNLI"},
),
ds.SplitGenerator(
name=ds.Split.VALIDATION,
gen_kw={"filepaths": [join(v, "xnli.dev.tsv")], "data_format": "XNLI"},
),
]
def _generate_examples(self, fmt, fs):
if fmt == "XNLI-MT":
for i, path in enumerate(fs):
f = open(path, encoding="utf-8")
r = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for j, x in enumerate(r):
k = str(i) + "_" + str(j)
yield k, {
"premise": x["premise"],
"hypothesis": x["hypo"],
"label": x["label"].replace("contradictory", "contradiction"),
}
else:
for path in fs:
with open(path, encoding="utf-8") as f:
r = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for x in r:
if x["language"] == self.config.name:
yield x["pairID"], {
"premise": x["sentence1"],
"hypothesis": x["sentence2"],
"label": x["gold_label"],
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,471
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/splinter.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
from tokenizers import normalizers
from ....tokens.fast import PreTrainedTokenizerFast
from ..splinter import Tokenizer as Splinter
VOCAB_FS = {"vocab_file": "vocab.txt"}
VOCAB_MAP = {
"vocab_file": {
"tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/vocab.txt",
"tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/vocab.txt",
"tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/vocab.txt",
"tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/vocab.txt",
}
}
INPUT_CAPS = {
"tau/splinter-base": 512,
"tau/splinter-base-qass": 512,
"tau/splinter-large": 512,
"tau/splinter-large-qass": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"tau/splinter-base": {"do_lower_case": False},
"tau/splinter-base-qass": {"do_lower_case": False},
"tau/splinter-large": {"do_lower_case": False},
"tau/splinter-large-qass": {"do_lower_case": False},
}
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
input_caps = INPUT_CAPS
slow_tokenizer_class = Splinter
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk="[UNK]",
sep="[SEP]",
pad="[PAD]",
cls="[CLS]",
msk="[MASK]",
question_token="[QUESTION]",
tokenize_chinese_chars=True,
strip_accents=None,
**kw,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
additional_special_tokens=(question_token,),
**kw,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
@property
def question_token_id(self):
return self.convert_tokens_to_ids(self.question_token)
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return [self.cls_token_id] + toks_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
suff = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
if self.padding_side == "right":
return cls + toks_0 + suff + sep + toks_1 + sep
else:
return cls + toks_0 + sep + toks_1 + suff + sep
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
suff = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
if self.padding_side == "right":
return len(cls + toks_0 + suff + sep) * [0] + len(toks_1 + sep) * [1]
else:
return len(cls + toks_0 + sep) * [0] + len(toks_1 + suff + sep) * [1]
def save_vocabulary(self, dir, pre=None):
return tuple(self._tokenizer.model.save(dir, name=pre))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,472
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/old/trafo.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
from qnarre.core.attention import Attend
from qnarre.core.base import Hypers, Module, Linear
from qnarre.core.mlp import MLP
from qnarre.core.deduce import Deduce, Search
from qnarre.core.norm import PreProc, PostProc
from qnarre.core.embed import TokEmbed, TypEmbed, PosEmbed, PosTiming
def adapter(ps, feats, x):
d = torch.parse_example(x, feats)
img = torch.to_dense(d["flt_img"])
# img = torch.cast(d['int_img'], torch.float32) / 255.
lbl = d["int_lbl"]
return img, lbl
def model(ps):
src = torch.Input(shape=(ps.len_src,), dtype="int32")
typ = torch.Input(shape=(ps.len_src,), dtype="int32")
hint = torch.Input(shape=(ps.len_tgt,), dtype="int32")
tgt = torch.Input(shape=(ps.len_tgt,), dtype="int32")
ins = [src, typ, hint, tgt]
outs = [Trafo(ps)(ins)]
m = torch.Model(name="TrafoModel", inputs=ins, outputs=outs)
return m
class Trafo(Module):
hs = Hypers(
[
"beam_size",
"drop_hidden",
"len_src",
"len_tgt",
"num_toks",
"pos_type",
"n_typ",
],
{},
)
typ_embed = pos_embed = enc_stack = dec_stack = pos_x_b = pos_p_b = None
def __init__(self, dim_out=None, hs=[], **kw):
if dim_out is not None:
kw.update(dim_out=dim_out)
super().__init__([self.hs] + hs, **kw)
cfg = self.cfg
kw.update(hs=hs)
self.tok_embed = TokEmbed(**kw)
if cfg.n_typ:
self.typ_embed = TypEmbed(**kw)
if cfg.pos_type == "embed":
self.pos_embed = PosEmbed(**kw)
elif cfg.pos_type == "timing":
self.pos_embed = PosTiming(**kw)
else:
assert cfg.pos_type == "relative"
self.pre = PreProc(**kw)
self.post = PostProc(**kw)
self.enc_stack = EncStack(self, **kw)
self.dec_stack = DecStack(self, **kw)
self.deduce = Deduce(self, **kw)
self.search = Search(self, **kw)
self.out = Linear(cfg.num_toks, **kw)
def forward(self, x, training=None):
src, typ, hint, tgt = x
ctx = None
if src is not None:
y = self.embed(src, typ)
ctx = self.enc_stack([y])
if hint is not None:
y = self.embed(hint)
ctx = self.dec_stack([y, ctx])
if training is not None:
out = self.deduce([ctx, tgt])
else:
# out = self.search([tgt, ctx])
pass
return out
def embed(self, x, typ=None):
y = self.tok_embed(x)
if self.typ_embed and typ is not None:
y = self.typ_embed([y, typ])
if self.pos_embed:
y = self.pos_embed(y)
return y
class Stack(Module):
def __init__(self, owner, ps=None, **kw):
super().__init__(ps, **kw)
self.pre = owner.pre
self.post = owner.post
class EncStack(Stack):
hs = Hypers(["n_encoders"], {})
def __init__(self, ps, owner, **kw):
super().__init__(ps, owner, **kw)
cfg = self.cfg
n = cfg.n_encoders
self.encs = [Encoder(ps, owner, f"enc_{i}") for i in range(n)]
def forward(self, x):
x = x[0]
y = self.pre([x, x])
for e in self.encs:
y = e([y])
y = self.post([x, y])
return y
class DecStack(Stack):
hs = Hypers(["num_dec_lays"], {})
def __init__(self, ps, owner, **kw):
super().__init__(ps, owner, **kw)
cfg = self.cfg
n = cfg.num_dec_lays
self.decs = [Decoder(ps, owner, f"dec_{i}") for i in range(n)]
def forward(self, x):
x, ctx = x
"""
cfg = self.cfg
if ps.causal_refl:
if ps.prepend_mode == 'prepend_inputs_full_attention':
y = torch.cumsum(torch.cumsum(rb, axis=1), axis=1)
y2 = torch.expand_dims(y, axis=1)
y = torch.greater(y2, torch.expand_dims(y, axis=2))
b = torch.expand_dims(torch.cast(y, torch.floatx()) * -1e9, axis=1)
else:
ln = torch.int_shape(x)[1]
sh = (1, 1, ln, ln)
b = U.ones_band_part(ln, ln, -1, 0, out_shape=sh)
b = -1e9 * (1.0 - b)
"""
y = self.pre([x, x])
for d in self.decs:
y = d([y, ctx])
y = self.post([x, y])
return y
class Encoder(Module):
hs = Hypers(
["len_mem"],
{},
)
mem = None
def __init__(self, owner, ps=None, name="enc", **kw):
super().__init__(ps, name=name, **kw)
self.refl = Attend(owner, ps, name=name + "_refl")
self.ffnet = MLP(owner, ps, name=name + "_ffnet")
mlen = self.cfg.len_mem
if mlen:
s = input_shape[0]
s = s[:1] + (mlen,) + s[2:]
self.mem = self.add_resource(self.name + "_mem", s)
def forward(self, x):
x = x[0]
y = self.reflect(x)
y = self.ffnet(y)
return y
def reflect(self, x):
m = self.mem
if m is None:
y = self.refl([x])
else:
y = self.refl([x, m])
i = self.cfg.len_mem
self.mem.assign(torch.concat([m, x], axis=1)[:, -i:])
return y
class Decoder(Encoder):
def __init__(self, owner, ps=None, name="dec", **kw):
super().__init__(owner, ps, name, **kw)
self.attn = Attend(owner, ps, name=name + "_attn")
def forward(self, x):
x, ctx = x
y = self.reflect(x)
if ctx is not None:
y = self.attn([y, ctx])
y = self.ffnet(y)
return y
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,473
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/led.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ....tokens.utils import PaddingStrategy
from .bart import Tokenizer as BartFast
from ..led import Tokenizer as LED
VOCAB_MAP = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
INPUT_CAPS = {
"allenai/led-base-16384": 16384,
}
class Tokenizer(BartFast):
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
slow_tokenizer_class = LED
def _pad(
self,
encoded_inputs,
max_length=None,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of=None,
return_attention_mask=None,
):
encoded_inputs = super()._pad(
encoded_inputs=encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
required_input = encoded_inputs[self.model_input_names[0]]
needs_to_be_padded = len(encoded_inputs["global_attention_mask"]) != len(required_input)
if needs_to_be_padded:
difference = len(required_input) - len(encoded_inputs["global_attention_mask"])
if self.padding_side == "right":
encoded_inputs["global_attention_mask"] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
else:
assert self.padding_side == "left"
encoded_inputs["global_attention_mask"] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
return encoded_inputs
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,474
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/runtime/cache.py
|
import json
import os
import random
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Optional
def default_cache_dir():
return os.path.join(Path.home(), ".triton", "cache")
class CacheManager(ABC):
def __init__(self, key):
pass
@abstractmethod
def get_file(self, filename) -> Optional[str]:
pass
@abstractmethod
def has_file(self, filename) -> bool:
pass
@abstractmethod
def put(self, data, filename, binary=True) -> str:
pass
@abstractmethod
def get_group(self, filename: str) -> Optional[Dict[str, str]]:
pass
@abstractmethod
def put_group(self, filename: str, group: Dict[str, str]):
pass
class FileCacheManager(CacheManager):
def __init__(self, key):
self.key = key
self.lock_path = None
# create cache directory if it doesn't exist
self.cache_dir = os.environ.get('TRITON_CACHE_DIR', default_cache_dir())
if self.cache_dir:
self.cache_dir = os.path.join(self.cache_dir, self.key)
self.lock_path = os.path.join(self.cache_dir, "lock")
os.makedirs(self.cache_dir, exist_ok=True)
def _make_path(self, filename) -> str:
return os.path.join(self.cache_dir, filename)
def has_file(self, filename):
if not self.cache_dir:
return False
return os.path.exists(self._make_path(filename))
def get_file(self, filename) -> Optional[str]:
if self.has_file(filename):
return self._make_path(filename)
else:
return None
def get_group(self, filename: str) -> Optional[Dict[str, str]]:
grp_filename = f"__grp__{filename}"
if not self.has_file(grp_filename):
return None
grp_filepath = self._make_path(grp_filename)
with open(grp_filepath) as f:
grp_data = json.load(f)
child_paths = grp_data.get("child_paths", None)
# Invalid group data.
if child_paths is None:
return None
result = {}
for c in child_paths:
p = self._make_path(c)
if not os.path.exists(p):
raise Exception(f"Group file {p} does not exist from group {grp_filename} ")
result[c] = p
return result
# Note a group of pushed files as being part of a group
def put_group(self, filename: str, group: Dict[str, str]):
if not self.cache_dir:
return
grp_contents = json.dumps({"child_paths": sorted(list(group.keys()))})
grp_filename = f"__grp__{filename}"
return self.put(grp_contents, grp_filename, binary=False)
def put(self, data, filename, binary=True) -> str:
if not self.cache_dir:
return
binary = isinstance(data, bytes)
if not binary:
data = str(data)
assert self.lock_path is not None
filepath = self._make_path(filename)
# Random ID to avoid any collisions
rnd_id = random.randint(0, 1000000)
# we use the PID incase a bunch of these around so we can see what PID made it
pid = os.getpid()
# use tempfile to be robust against program interruptions
temp_path = f"{filepath}.tmp.pid_{pid}_{rnd_id}"
mode = "wb" if binary else "w"
with open(temp_path, mode) as f:
f.write(data)
# Replace is guaranteed to be atomic on POSIX systems if it succeeds
# so filepath cannot see a partial write
os.replace(temp_path, filepath)
return filepath
__cache_cls = FileCacheManager
__cache_cls_nme = "DEFAULT"
def get_cache_manager(key) -> CacheManager:
import os
user_cache_manager = os.environ.get("TRITON_CACHE_MANAGER", None)
global __cache_cls
global __cache_cls_nme
if user_cache_manager is not None and user_cache_manager != __cache_cls_nme:
import importlib
module_path, clz_nme = user_cache_manager.split(":")
module = importlib.import_module(module_path)
__cache_cls = getattr(module, clz_nme)
__cache_cls_nme = user_cache_manager
return __cache_cls(key)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,475
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/realm.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from dataclasses import dataclass
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
from ...pytorch_utils import (
apply_chunking_to_forward,
)
log = logging.get_logger(__name__)
LIST = [
"google/realm-cc-news-pretrained-embedder",
"google/realm-cc-news-pretrained-encoder",
"google/realm-cc-news-pretrained-scorer",
"google/realm-cc-news-pretrained-openqa",
"google/realm-orqa-nq-openqa",
"google/realm-orqa-nq-reader",
"google/realm-orqa-wq-openqa",
"google/realm-orqa-wq-reader",
]
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->Realm
class RealmEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.position_embeddings = qc.Embed(config.n_pos, config.d_model)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.pos_type = getattr(config, "pos_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
]
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.pos_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Realm
class RealmSelfAttention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.attention_head_size = int(config.d_model / config.n_heads)
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.drop = qc.Dropout(config.drop_attn)
self.pos_type = pos_type or getattr(config, "pos_type", "absolute")
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
self.n_pos = config.n_pos
self.distance_embedding = qc.Embed(2 * config.n_pos - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hiddens)
is_cross_attention = enc_hiddens is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, crosses
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(enc_hiddens))
value_layer = self.transpose_for_scores(self.value(enc_hiddens))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
past_key_value = (key_layer, value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
seq_length = hiddens.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
-1, 1
)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
1, -1
)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.n_pos - 1)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.pos_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.pos_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores + relative_position_scores_query + relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RealmModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.drop(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Realm
class RealmSelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Realm
class Attention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
self.self = RealmSelfAttention(config, pos_type=pos_type)
self.output = RealmSelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hiddens)
outputs = (attention_output,) + self_outputs[1:] # add attns if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Realm
class RealmIntermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Realm
class RealmOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Realm
class Layer(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(
f"{self} should be used as a decoder model if cross attention is added"
)
self.crossattention = Attention(config, pos_type="absolute")
self.intermediate = RealmIntermediate(config)
self.output = RealmOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attns if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and enc_hiddens is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = (
outputs + cross_attention_outputs[1:-1]
) # add cross attns if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Realm
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if y_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = caches[i] if caches is not None else None
if self.gradient_checkpointing and self.training:
if y_cache:
log.warning(
"`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..."
)
y_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hiddens = layer_outputs[0]
if y_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [
hiddens,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return qo.CachesCrosses(
y=hiddens,
caches=next_decoder_cache,
hiddens=all_hidden_states,
attns=all_self_attentions,
crosses=all_cross_attentions,
)
@dataclass
class RealmEmbedderOutput(ModelOutput):
projected_score = None
hiddens = None
attns = None
@dataclass
class RealmScorerOutput(ModelOutput):
relevance_score = None
query_score = None
candidate_score = None
@dataclass
class RealmReaderOutput(ModelOutput):
loss = None
retriever_loss = None
reader_loss = None
retriever_correct = None
reader_correct = None
block_idx = None
candidate = None
start_pos = None
end_pos = None
hiddens = None
attns = None
@dataclass
class RealmForOpenQAOutput(ModelOutput):
reader_output = None
predicted_answer_ids = None
class RealmPredictionHeadTransform(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_model)
self.act = qu.activation(cfg.act)
self.norm = qc.LayerNorm(cfg.d_model, eps=cfg.eps)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
y = self.norm(y)
return y
class RealmLMPredictionHead(qc.Module):
def __init__(self, config):
super().__init__()
self.transform = RealmPredictionHeadTransform(config)
self.decoder = qc.Linear(config.d_model, config.s_vocab, bias=False)
self.bias = nn.Parameter(torch.zeros(config.s_vocab))
self.decoder.bias = self.bias
def forward(self, x):
y = self.transform(x)
y = self.decoder(y)
return y
class RealmOnlyMLMHead(qc.Module):
def __init__(self, config):
super().__init__()
self.predictions = RealmLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class RealmScorerProjection(qc.Module):
def __init__(self, config):
super().__init__()
self.predictions = RealmLMPredictionHead(config)
self.dense = qc.Linear(config.d_model, config.retriever_proj_size)
self.norm = qc.LayerNorm(config.retriever_proj_size, eps=config.eps)
def forward(self, hiddens):
hiddens = self.dense(hiddens)
hiddens = self.norm(hiddens)
return hiddens
class RealmReaderProjection(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense_intermediate = qc.Linear(config.d_model, config.span_hidden_size * 2)
self.dense_output = qc.Linear(config.span_hidden_size, 1)
self.layer_normalization = qc.LayerNorm(
config.span_hidden_size, eps=config.reader_layer_norm_eps
)
self.relu = nn.ReLU()
def forward(self, hiddens, block_mask):
def span_candidates(masks):
_, max_sequence_len = masks.shape
def _spans_given_width(width):
current_starts = torch.arange(max_sequence_len - width + 1, device=masks.device)
current_ends = torch.arange(width - 1, max_sequence_len, device=masks.device)
return current_starts, current_ends
starts, ends = zip(
*(_spans_given_width(w + 1) for w in range(self.config.max_span_width))
)
# [num_spans]
starts = torch.cat(starts, 0)
ends = torch.cat(ends, 0)
# [num_retrievals, num_spans]
start_masks = torch.index_select(masks, dim=-1, index=starts)
end_masks = torch.index_select(masks, dim=-1, index=ends)
span_masks = start_masks * end_masks
return starts, ends, span_masks
def mask_to_score(mask):
return (1.0 - mask.type(torch.float32)) * -10000.0
# [reader_beam_size, max_sequence_len, span_hidden_size * 2]
hiddens = self.dense_intermediate(hiddens)
# [reader_beam_size, max_sequence_len, span_hidden_size]
start_projection, end_projection = hiddens.chunk(2, dim=-1)
candidate_starts, candidate_ends, candidate_mask = span_candidates(block_mask)
candidate_start_projections = torch.index_select(
start_projection, dim=1, index=candidate_starts
)
candidate_end_projections = torch.index_select(end_projection, dim=1, index=candidate_ends)
candidate_hidden = candidate_start_projections + candidate_end_projections
# [reader_beam_size, num_candidates, span_hidden_size]
candidate_hidden = self.relu(candidate_hidden)
# [reader_beam_size, num_candidates, span_hidden_size]
candidate_hidden = self.layer_normalization(candidate_hidden)
# [reader_beam_size, num_candidates]
reader_logits = self.dense_output(candidate_hidden).squeeze(-1)
# [reader_beam_size, num_candidates]
reader_logits += mask_to_score(candidate_mask)
return reader_logits, candidate_starts, candidate_ends
class Model(PreTrained):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RealmEmbeddings(config)
self.encoder = Encoder(config)
self.pool = Pool(config) if add_pooling_layer else None
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
y_cache = y_cache if y_cache is not None else self.config.y_cache
else:
y_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = caches[0][0].shape[2] if caches is not None else 0
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
if self.config.is_decoder and enc_hiddens is not None:
encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_extended_attention_mask,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pool(sequence_output) if self.pool is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return qo.BaseWithPoolingAndCrossAttentions(
y=sequence_output,
pools=pooled_output,
caches=encoder_outputs.caches,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
crosses=encoder_outputs.crosses,
)
class RealmEmbedder(PreTrained):
def __init__(self, config):
super().__init__(config)
self.realm = Model(self.config)
self.cls = RealmScorerProjection(self.config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
realm_outputs = self.realm(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# [batch_size, d_model]
pools = realm_outputs[1]
# [batch_size, retriever_proj_size]
projected_score = self.cls(pools)
if not return_dict:
return (projected_score,) + realm_outputs[2:4]
else:
return RealmEmbedderOutput(
projected_score=projected_score,
hiddens=realm_outputs.hiddens,
attns=realm_outputs.attns,
)
class RealmScorer(PreTrained):
def __init__(self, config, query_embedder=None):
super().__init__(config)
self.embedder = RealmEmbedder(self.config)
self.query_embedder = query_embedder if query_embedder is not None else self.embedder
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
candidate_input_ids=None,
candidate_attention_mask=None,
candidate_token_type_ids=None,
candidate_inputs_embeds=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("You have to specify either input_ids or input_embeds.")
if candidate_input_ids is None and candidate_inputs_embeds is None:
raise ValueError(
"You have to specify either candidate_input_ids or candidate_inputs_embeds."
)
query_outputs = self.query_embedder(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# [batch_size * num_candidates, candidate_seq_len]
(
flattened_input_ids,
flattened_attention_mask,
flattened_token_type_ids,
) = self._flatten_inputs(
candidate_input_ids, candidate_attention_mask, candidate_token_type_ids
)
candidate_outputs = self.embedder(
flattened_input_ids,
attention_mask=flattened_attention_mask,
token_type_ids=flattened_token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=candidate_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# [batch_size, retriever_proj_size]
query_score = query_outputs[0]
# [batch_size * num_candidates, retriever_proj_size]
candidate_score = candidate_outputs[0]
# [batch_size, num_candidates, retriever_proj_size]
candidate_score = candidate_score.view(
-1, self.config.num_candidates, self.config.retriever_proj_size
)
# [batch_size, num_candidates]
relevance_score = torch.einsum("BD,BND->BN", query_score, candidate_score)
if not return_dict:
return relevance_score, query_score, candidate_score
return RealmScorerOutput(
relevance_score=relevance_score,
query_score=query_score,
candidate_score=candidate_score,
)
class RealmKnowledgeAugEncoder(PreTrained):
def __init__(self, config):
super().__init__(config)
self.realm = Model(self.config)
self.cls = RealmOnlyMLMHead(self.config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
relevance_score=None,
labels=None,
mlm_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
(
flattened_input_ids,
flattened_attention_mask,
flattened_token_type_ids,
) = self._flatten_inputs(input_ids, attention_mask, token_type_ids)
joint_outputs = self.realm(
flattened_input_ids,
attention_mask=flattened_attention_mask,
token_type_ids=flattened_token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# [batch_size * num_candidates, joint_seq_len, d_model]
joint_output = joint_outputs[0]
# [batch_size * num_candidates, joint_seq_len, s_vocab]
prediction_scores = self.cls(joint_output)
# [batch_size, num_candidates]
candidate_score = relevance_score
masked_lm_loss = None
if labels is not None:
if candidate_score is None:
raise ValueError(
"You have to specify `relevance_score` when `labels` is specified in order to compute loss."
)
batch_size, seq_length = labels.size()
if mlm_mask is None:
mlm_mask = torch.ones_like(labels, dtype=torch.float32)
else:
mlm_mask = mlm_mask.type(torch.float32)
# Compute marginal log-likelihood
loss_fct = CrossEntropyLoss(reduction="none") # -100 index = padding token
# [batch_size * num_candidates * joint_seq_len, s_vocab]
mlm_logits = prediction_scores.view(-1, self.config.s_vocab)
# [batch_size * num_candidates * joint_seq_len]
mlm_targets = labels.tile(1, self.config.num_candidates).view(-1)
# [batch_size, num_candidates, joint_seq_len]
masked_lm_log_prob = -loss_fct(mlm_logits, mlm_targets).view(
batch_size, self.config.num_candidates, seq_length
)
# [batch_size, num_candidates, 1]
candidate_log_prob = candidate_score.log_softmax(-1).unsqueeze(-1)
# [batch_size, num_candidates, joint_seq_len]
joint_gold_log_prob = candidate_log_prob + masked_lm_log_prob
# [batch_size, joint_seq_len]
marginal_gold_log_probs = joint_gold_log_prob.logsumexp(1)
# []
masked_lm_loss = -torch.nansum(
torch.sum(marginal_gold_log_probs * mlm_mask) / torch.sum(mlm_mask)
)
if not return_dict:
output = (prediction_scores,) + joint_outputs[2:4]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hiddens=joint_outputs.hiddens,
attns=joint_outputs.attns,
)
class RealmReader(PreTrained):
_keys_to_ignore_on_load_unexpected = [r"pooler", "cls"]
def __init__(self, config):
super().__init__(config)
self.n_labels = config.n_labels
self.realm = Model(config)
self.cls = RealmOnlyMLMHead(config)
self.qa_outputs = RealmReaderProjection(config)
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
relevance_score=None,
block_mask=None,
start_positions=None,
end_positions=None,
has_answers=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if relevance_score is None:
raise ValueError("You have to specify `relevance_score` to calculate logits and loss.")
if block_mask is None:
raise ValueError(
"You have to specify `block_mask` to separate question block and evidence block."
)
if token_type_ids.size(1) < self.config.max_span_width:
raise ValueError(
"The input sequence length must be greater than or equal to config.max_span_width."
)
outputs = self.realm(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# [reader_beam_size, joint_seq_len, d_model]
sequence_output = outputs[0]
# [reader_beam_size, num_candidates], [num_candidates], [num_candidates]
reader_logits, candidate_starts, candidate_ends = self.qa_outputs(
sequence_output, block_mask[0 : self.config.reader_beam_size]
)
# [searcher_beam_size, 1]
retriever_logits = torch.unsqueeze(relevance_score[0 : self.config.reader_beam_size], -1)
# [reader_beam_size, num_candidates]
reader_logits += retriever_logits
# []
predicted_block_index = torch.argmax(torch.max(reader_logits, dim=1).values)
# []
predicted_candidate = torch.argmax(torch.max(reader_logits, dim=0).values)
# [1]
predicted_start = torch.index_select(candidate_starts, dim=0, index=predicted_candidate)
# [1]
predicted_end = torch.index_select(candidate_ends, dim=0, index=predicted_candidate)
total_loss = None
retriever_loss = None
reader_loss = None
retriever_correct = None
reader_correct = None
if start_positions is not None and end_positions is not None and has_answers is not None:
def compute_correct_candidates(
candidate_starts, candidate_ends, gold_starts, gold_ends
):
"""Compute correct span."""
# [reader_beam_size, num_answers, num_candidates]
is_gold_start = torch.eq(
torch.unsqueeze(torch.unsqueeze(candidate_starts, 0), 0),
torch.unsqueeze(gold_starts, -1),
)
is_gold_end = torch.eq(
torch.unsqueeze(torch.unsqueeze(candidate_ends, 0), 0),
torch.unsqueeze(gold_ends, -1),
)
# [reader_beam_size, num_candidates]
return torch.any(torch.logical_and(is_gold_start, is_gold_end), 1)
def marginal_log_loss(logits, is_correct):
"""Loss based on the negative marginal log-likelihood."""
def mask_to_score(mask):
return (1.0 - mask.type(torch.float32)) * -10000.0
# []
log_numerator = torch.logsumexp(logits + mask_to_score(is_correct), dim=-1)
log_denominator = torch.logsumexp(logits, dim=-1)
return log_denominator - log_numerator
# sometimes the start/end positions are outside our model inputs, we ignore these terms
# `-1` is reserved for no answer.
ignored_index = sequence_output.size(1)
start_positions = start_positions.clamp(-1, ignored_index)
end_positions = end_positions.clamp(-1, ignored_index)
retriever_correct = has_answers
any_retriever_correct = torch.any(retriever_correct)
reader_correct = compute_correct_candidates(
candidate_starts=candidate_starts,
candidate_ends=candidate_ends,
gold_starts=start_positions[0 : self.config.reader_beam_size],
gold_ends=end_positions[0 : self.config.reader_beam_size],
)
any_reader_correct = torch.any(reader_correct)
retriever_loss = marginal_log_loss(relevance_score, retriever_correct)
reader_loss = marginal_log_loss(reader_logits.view(-1), reader_correct.view(-1))
retriever_loss *= any_retriever_correct.type(torch.float32)
reader_loss *= any_reader_correct.type(torch.float32)
total_loss = (retriever_loss + reader_loss).mean()
if not return_dict:
output = (
predicted_block_index,
predicted_candidate,
predicted_start,
predicted_end,
) + outputs[2:]
return (
(
(total_loss, retriever_loss, reader_loss, retriever_correct, reader_correct)
+ output
)
if total_loss is not None
else output
)
return RealmReaderOutput(
loss=total_loss,
retriever_loss=retriever_loss,
reader_loss=reader_loss,
retriever_correct=retriever_correct,
reader_correct=reader_correct,
block_idx=predicted_block_index,
candidate=predicted_candidate,
start_pos=predicted_start,
end_pos=predicted_end,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForQA(PreTrained):
def __init__(self, config, retriever=None):
super().__init__(config)
self.embedder = RealmEmbedder(config)
self.reader = RealmReader(config)
self.register_buffer(
"block_emb",
torch.zeros(()).new_empty(
size=(config.num_block_records, config.retriever_proj_size),
dtype=torch.float32,
device=torch.device("cpu"),
),
)
self.retriever = retriever
@property
def searcher_beam_size(self):
if self.training:
return self.config.searcher_beam_size
return self.config.reader_beam_size
def block_embedding_to(self, device):
self.block_emb = self.block_emb.to(device)
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
answer_ids=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and input_ids.shape[0] != 1:
raise ValueError("The batch_size of the inputs must be 1.")
question_outputs = self.embedder(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
return_dict=True,
)
question_projection = question_outputs[0]
batch_scores = torch.einsum(
"BD,QD->QB", self.block_emb, question_projection.to(self.block_emb.device)
)
_, retrieved_block_ids = torch.topk(batch_scores, k=self.searcher_beam_size, dim=-1)
retrieved_block_ids = retrieved_block_ids.squeeze()
retrieved_block_emb = torch.index_select(self.block_emb, dim=0, index=retrieved_block_ids)
has_answers, start_pos, end_pos, concat_inputs = self.retriever(
retrieved_block_ids.cpu(), input_ids, answer_ids, max_length=self.config.reader_seq_len
)
concat_inputs = concat_inputs.to(self.reader.device)
block_mask = concat_inputs.special_tokens_mask.type(torch.bool).to(
device=self.reader.device
)
block_mask.logical_not_().logical_and_(concat_inputs.token_type_ids.type(torch.bool))
if has_answers is not None:
has_answers = torch.tensor(has_answers, dtype=torch.bool, device=self.reader.device)
start_pos = torch.tensor(start_pos, dtype=torch.long, device=self.reader.device)
end_pos = torch.tensor(end_pos, dtype=torch.long, device=self.reader.device)
retrieved_logits = torch.einsum(
"D,BD->B", question_projection.squeeze(), retrieved_block_emb.to(self.reader.device)
)
reader_output = self.reader(
input_ids=concat_inputs.input_ids[0 : self.config.reader_beam_size],
attention_mask=concat_inputs.attention_mask[0 : self.config.reader_beam_size],
token_type_ids=concat_inputs.token_type_ids[0 : self.config.reader_beam_size],
relevance_score=retrieved_logits,
block_mask=block_mask,
has_answers=has_answers,
start_positions=start_pos,
end_positions=end_pos,
return_dict=True,
)
predicted_block = concat_inputs.input_ids[reader_output.block_idx]
predicted_answer_ids = predicted_block[reader_output.start_pos : reader_output.end_pos + 1]
if not return_dict:
return reader_output, predicted_answer_ids
return RealmForOpenQAOutput(
reader_output=reader_output,
predicted_answer_ids=predicted_answer_ids,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,476
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/convbert.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.convbert import PreTrained
from torch.nn import CrossEntropyLoss
from ...modeling_utils import SequenceSummary
from ...pytorch_utils import (
apply_chunking_to_forward,
)
from . import bert
log = logging.get_logger(__name__)
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.model = Model(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = qc.Linear(config.d_model, 1)
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
position_ids = (
position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
)
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Predictor(cfg.d_embed, **kw)
forward = qf.forward_masked
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(cfg.d_model, **kw)
forward = qf.forward_seq
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
class PredictionHeadTransform(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_model)
self.act = qu.activation(cfg.act)
self.norm = qc.LayerNorm(cfg.d_model, eps=cfg.eps)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
y = self.norm(y)
return y
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.embeddings = Embed(config)
if config.d_embed != config.d_model:
self.embeddings_project = qc.Linear(config.d_embed, config.d_model)
self.encoder = Encoder(config)
self.config = config
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
hiddens = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if hasattr(self, "embeddings_project"):
hiddens = self.embeddings_project(hiddens)
hiddens = self.encoder(
hiddens,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return hiddens
class Group(qc.Module):
def __init__(self, input_size, output_size, n_groups):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.n_groups = n_groups
self.group_in_dim = self.input_size // self.n_groups
self.group_out_dim = self.output_size // self.n_groups
self.weight = nn.Parameter(
torch.empty(self.n_groups, self.group_in_dim, self.group_out_dim)
)
self.bias = nn.Parameter(torch.empty(output_size))
def forward(self, hiddens):
batch_size = list(hiddens.size())[0]
x = torch.reshape(hiddens, [-1, self.n_groups, self.group_in_dim])
x = x.permute(1, 0, 2)
x = torch.matmul(x, self.weight)
x = x.permute(1, 0, 2)
x = torch.reshape(x, [batch_size, -1, self.output_size])
x = x + self.bias
return x
class Intermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
if cfg.n_groups == 1:
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
else:
self.dense = Group(
input_size=cfg.d_model,
output_size=cfg.d_ff,
n_groups=cfg.n_groups,
)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
class Output(qc.Module):
def __init__(self, config):
super().__init__()
if config.n_groups == 1:
self.dense = qc.Linear(config.d_ff, config.d_model)
else:
self.dense = Group(
input_size=config.d_ff,
output_size=config.d_model,
n_groups=config.n_groups,
)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Layer(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder
self.crossattention = Attention(config)
self.intermediate = Intermediate(config)
self.output = Output(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and enc_hiddens is not None:
if not hasattr(self, "crossattention"):
raise AttributeError(
f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
encoder_attention_mask,
head_mask,
enc_hiddens,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
output_attentions,
)
hiddens = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [
hiddens,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return qo.BaseWithCrossAttentions(
y=hiddens,
hiddens=all_hidden_states,
attns=all_self_attentions,
crosses=all_cross_attentions,
)
class Attention(qc.Module):
def __init__(self, config):
super().__init__()
self.self = SelfAttention(config)
self.output = SelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask,
head_mask,
enc_hiddens,
output_attentions,
)
attention_output = self.output(self_outputs[0], hiddens)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class SelfAttention(qc.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
new_num_attention_heads = config.n_heads // config.head_ratio
if new_num_attention_heads < 1:
self.head_ratio = config.n_heads
self.n_heads = 1
else:
self.n_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.conv_kernel_size = config.conv_kernel_size
if config.d_model % self.n_heads != 0:
raise ValueError("d_model should be divisible by n_heads")
self.attention_head_size = config.d_model // config.n_heads
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.key_conv_attn_layer = Conv1D(
config, config.d_model, self.all_head_size, self.conv_kernel_size
)
self.conv_kernel_layer = qc.Linear(self.all_head_size, self.n_heads * self.conv_kernel_size)
self.conv_out_layer = qc.Linear(config.d_model, self.all_head_size)
self.unfold = nn.Unfold(
kernel_size=[self.conv_kernel_size, 1],
padding=[int((self.conv_kernel_size - 1) / 2), 0],
)
self.drop = qc.Dropout(config.drop_attn)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
output_attentions=False,
):
mixed_query_layer = self.query(hiddens)
batch_size = hiddens.size(0)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if enc_hiddens is not None:
mixed_key_layer = self.key(enc_hiddens)
mixed_value_layer = self.value(enc_hiddens)
else:
mixed_key_layer = self.key(hiddens)
mixed_value_layer = self.value(hiddens)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hiddens.transpose(1, 2))
mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
conv_out_layer = self.conv_out_layer(hiddens)
conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
conv_out_layer = F.unfold(
conv_out_layer,
kernel_size=[self.conv_kernel_size, 1],
dilation=1,
padding=[(self.conv_kernel_size - 1) // 2, 0],
stride=1,
)
conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
batch_size, -1, self.all_head_size, self.conv_kernel_size
)
conv_out_layer = torch.reshape(
conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]
)
conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.drop(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
conv_out = torch.reshape(
conv_out_layer, [batch_size, -1, self.n_heads, self.attention_head_size]
)
context_layer = torch.cat([context_layer, conv_out], 2)
new_context_layer_shape = context_layer.size()[:-2] + (
self.head_ratio * self.all_head_size,
)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class SelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Embed(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_embed, padding_idx=config.PAD)
self.position_embeddings = qc.Embed(config.n_pos, config.d_embed)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_embed)
self.norm = qc.LayerNorm(config.d_embed, eps=config.eps)
self.drop = qc.Dropout(config.drop)
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
class Conv1D(qc.Module):
def __init__(self, config, input_filters, output_filters, kernel_size, **kw):
super().__init__()
self.depthwise = qc.Conv1d(
input_filters,
input_filters,
kernel_size=kernel_size,
groups=input_filters,
padding=kernel_size // 2,
bias=False,
)
self.pointwise = qc.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
self.bias = nn.Parameter(torch.zeros(output_filters, 1))
self.depthwise.weight.data.normal_(mean=0.0, std=config.init_range)
self.pointwise.weight.data.normal_(mean=0.0, std=config.init_range)
def forward(self, hiddens):
x = self.depthwise(hiddens)
x = self.pointwise(x)
x += self.bias
return x
LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
]
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,477
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/operators/test_inductor.py
|
import torch
import triton
import triton.language as tl
def test_normalization_with_remat():
@triton.jit
def triton_(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 64
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp11 = tl.load(in_ptr2 + (x0), xmask)
tmp13 = tl.load(in_ptr3 + (x0), xmask)
_tmp17 = tl.zeros([XBLOCK, RBLOCK], tl.float32) + 0
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096 * x3)), rmask & xmask, eviction_policy='evict_last', other=0)
tmp2 = tmp0 - tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tl.sqrt(tmp5)
tmp7 = 1 / tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = tmp2 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
_tmp17 = tl.where(rmask & xmask, _tmp17 + tmp14, _tmp17)
tl.store(in_out_ptr0 + (r2 + (4096 * x3) + tl.zeros([XBLOCK, RBLOCK], tl.int32)), tmp14, rmask & xmask)
tmp17 = tl.sum(_tmp17, 1)[:, None]
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tl.store(in_out_ptr1 + (x3 + tl.zeros([XBLOCK, 1], tl.int32)), tmp19, xmask)
torch.manual_seed(123)
buf14 = torch.rand(8, 64, 64, 64, device="cuda")
buf16 = torch.rand(8, 1, 64, device="cuda")
arg114_1 = torch.rand(64, device="cuda")
arg115_1 = torch.rand(64, device="cuda")
arg8_1 = torch.rand(64, device="cuda")
arg9_1 = torch.rand(64, device="cuda")
triton_[(512,)](buf14, buf16, arg114_1, arg115_1, arg8_1, arg9_1, 512, 4096, 1, 2048)
torch.testing.assert_allclose(buf16.mean().item(), buf14.mean().item(), atol=1e-7, rtol=0)
def test_avg_pool_bw():
@triton.jit
def triton_(in_ptr0, out_ptr0, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x5 = xindex
tmp0 = (-1) + x1
tmp1 = (-1) + x0
tmp2 = 2 + x1
tmp3 = 2 + x0
tmp4 = 0
tmp5 = tl.where(tmp0 != tmp0, tmp0, tl.where(tmp0 > tmp4, tmp0, tmp4))
tmp6 = tl.where(tmp1 != tmp1, tmp1, tl.where(tmp1 > tmp4, tmp1, tmp4))
tmp7 = 8
tmp8 = tl.where(tmp2 != tmp2, tmp2, tl.where(tmp2 < tmp7, tmp2, tmp7))
tmp9 = tl.where(tmp3 != tmp3, tmp3, tl.where(tmp3 < tmp7, tmp3, tmp7))
tmp10 = tmp5 + tmp4
tmp11 = tmp6 + tmp4
tmp12 = 1
tmp13 = tmp8 - tmp12
tmp14 = tl.where(tmp10 != tmp10, tmp10, tl.where(tmp10 < tmp13, tmp10, tmp13))
tmp15 = tmp9 - tmp12
tmp16 = tl.where(tmp11 != tmp11, tmp11, tl.where(tmp11 < tmp15, tmp11, tmp15))
tmp17 = tl.load(in_ptr0 + (tmp16 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp18 = tmp17 / 9
tmp19 = tmp10 < tmp8
tmp20 = tmp11 < tmp9
tmp21 = tmp19 & tmp20
tmp22 = 0.0
tmp23 = tl.where(tmp21, tmp18, tmp22)
tmp24 = tmp6 + tmp12
tmp25 = tl.where(tmp24 != tmp24, tmp24, tl.where(tmp24 < tmp15, tmp24, tmp15))
tmp26 = tl.load(in_ptr0 + (tmp25 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp27 = tmp26 / 9
tmp28 = tmp24 < tmp9
tmp29 = tmp19 & tmp28
tmp30 = tmp23 + tmp27
tmp31 = tl.where(tmp29, tmp30, tmp23)
tmp32 = 2
tmp33 = tmp6 + tmp32
tmp34 = tl.where(tmp33 != tmp33, tmp33, tl.where(tmp33 < tmp15, tmp33, tmp15))
tmp35 = tl.load(in_ptr0 + (tmp34 + (8 * tmp14) + (64 * x2)), None).to(tl.float32)
tmp36 = tmp35 / 9
tmp37 = tmp33 < tmp9
tmp38 = tmp19 & tmp37
tmp39 = tmp31 + tmp36
tmp40 = tl.where(tmp38, tmp39, tmp31)
tmp41 = tmp5 + tmp12
tmp42 = tl.where(tmp41 != tmp41, tmp41, tl.where(tmp41 < tmp13, tmp41, tmp13))
tmp43 = tl.load(in_ptr0 + (tmp16 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp44 = tmp43 / 9
tmp45 = tmp41 < tmp8
tmp46 = tmp45 & tmp20
tmp47 = tmp40 + tmp44
tmp48 = tl.where(tmp46, tmp47, tmp40)
tmp49 = tl.load(in_ptr0 + (tmp25 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp50 = tmp49 / 9
tmp51 = tmp45 & tmp28
tmp52 = tmp48 + tmp50
tmp53 = tl.where(tmp51, tmp52, tmp48)
tmp54 = tl.load(in_ptr0 + (tmp34 + (8 * tmp42) + (64 * x2)), None).to(tl.float32)
tmp55 = tmp54 / 9
tmp56 = tmp45 & tmp37
tmp57 = tmp53 + tmp55
tmp58 = tl.where(tmp56, tmp57, tmp53)
tmp59 = tmp5 + tmp32
tmp60 = tl.where(tmp59 != tmp59, tmp59, tl.where(tmp59 < tmp13, tmp59, tmp13))
tmp61 = tl.load(in_ptr0 + (tmp16 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp62 = tmp61 / 9
tmp63 = tmp59 < tmp8
tmp64 = tmp63 & tmp20
tmp65 = tmp58 + tmp62
tmp66 = tl.where(tmp64, tmp65, tmp58)
tmp67 = tl.load(in_ptr0 + (tmp25 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp68 = tmp67 / 9
tmp69 = tmp63 & tmp28
tmp70 = tmp66 + tmp68
tmp71 = tl.where(tmp69, tmp70, tmp66)
tmp72 = tl.load(in_ptr0 + (tmp34 + (8 * tmp60) + (64 * x2)), None).to(tl.float32)
tmp73 = tmp72 / 9
tmp74 = tmp63 & tmp37
tmp75 = tmp71 + tmp73
tmp76 = tl.where(tmp74, tmp75, tmp71)
tl.store(out_ptr0 + (x5 + tl.zeros([XBLOCK], tl.int32)), tmp76, None)
inp = torch.ones(8, 2048, 8, 8, device="cuda", dtype=torch.half)
out = torch.ones_like(inp) * 3
numel = inp.numel()
triton_[(numel // 1024,)](inp, out, 1024)
out_ref = torch.ones_like(inp)
out_ref[:, :, 1:7, 0::7] = 2 / 3
out_ref[:, :, 0::7, 1:7] = 2 / 3
out_ref[:, :, 0::7, 0::7] = 4 / 9
torch.testing.assert_allclose(out, out_ref)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,478
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/data2vec.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.data2vec import PreTrained
from torch.nn import CrossEntropyLoss
log = logging.get_logger(__name__)
class ForCausal(PreTrained):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [
r"position_ids",
r"lm_head.decoder.weight",
r"lm_head.decoder.bias",
]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
log.warning("If you want to use `Model` as a standalone, add `is_decoder=True.`")
self.data2vec_text = Model(config, add_pooling_layer=False)
self.lm_head = Predictor(config)
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
enc_hiddens=None,
encoder_attention_mask=None,
labels=None,
caches=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
y_cache = False
outputs = self.data2vec_text(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_attention_mask,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(
shifted_prediction_scores.view(-1, self.config.s_vocab), labels.view(-1)
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
caches=outputs.caches,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.data2vec_text = Model(config)
self.drop = qc.Dropout(config.drop)
self.classifier = qc.Linear(config.d_model, 1)
self.post_init()
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
)
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.data2vec_text(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Predictor(cfg.d_embed, **kw)
forward = qf.forward_masked
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = Classifier(cfg.d_model, "tanh", **kw)
forward = qf.forward_seq
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
class Model(PreTrained):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = Embed(config)
self.encoder = Encoder(config)
self.pool = Pool(config) if add_pooling_layer else None
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
y_cache = y_cache if y_cache is not None else self.config.y_cache
else:
y_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = caches[0][0].shape[2] if caches is not None else 0
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
if self.config.is_decoder and enc_hiddens is not None:
encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_extended_attention_mask,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pool(sequence_output) if self.pool is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return qo.BaseWithPoolingAndCrossAttentions(
y=sequence_output,
pools=pooled_output,
caches=encoder_outputs.caches,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
crosses=encoder_outputs.crosses,
)
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Data2VecText
class Layer(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(
f"{self} should be used as a decoder model if cross attention is added"
)
self.crossattention = Attention(config, pos_type="absolute")
self.intermediate = Intermediate(config)
self.output = Output(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:]
cross_attn_present_key_value = None
if self.is_decoder and enc_hiddens is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class Intermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
# Copied from transformers.models.bert.modeling_bert.BertOutput
class Output(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Data2VecText
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if y_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = caches[i] if caches is not None else None
if self.gradient_checkpointing and self.training:
if y_cache:
log.warning(
"`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..."
)
y_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hiddens = layer_outputs[0]
if y_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [
hiddens,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return qo.CachesCrosses(
y=hiddens,
caches=next_decoder_cache,
hiddens=all_hidden_states,
attns=all_self_attentions,
crosses=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Data2VecText
class Attention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
self.self = SelfAttention(config, pos_type=pos_type)
self.output = SelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hiddens)
outputs = (attention_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Data2VecText
class SelfAttention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.attention_head_size = int(config.d_model / config.n_heads)
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.drop = qc.Dropout(config.drop_attn)
self.pos_type = pos_type or getattr(config, "pos_type", "absolute")
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
self.n_pos = config.n_pos
self.distance_embedding = qc.Embed(2 * config.n_pos - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hiddens)
is_cross_attention = enc_hiddens is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, crosses
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(enc_hiddens))
value_layer = self.transpose_for_scores(self.value(enc_hiddens))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
seq_length = hiddens.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
-1, 1
)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
1, -1
)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.n_pos - 1)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.pos_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.pos_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores + relative_position_scores_query + relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in Data2VecTextModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.drop(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class SelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText
class Embed(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.position_embeddings = qc.Embed(config.n_pos, config.d_model)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
self.pos_type = getattr(config, "pos_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
self.padding_idx = config.PAD
self.position_embeddings = qc.Embed(
config.n_pos, config.d_model, padding_idx=self.padding_idx
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if position_ids is None:
if input_ids is not None:
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.pos_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1,
sequence_length + self.padding_idx + 1,
dtype=torch.long,
device=inputs_embeds.device,
)
return position_ids.unsqueeze(0).expand(input_shape)
LIST = [
"facebook/data2vec-text-base",
]
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,479
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/language/math.py
|
import functools
import os
from . import core
@functools.lru_cache()
def libdevice_path():
import torch
third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party")
if torch.version.hip is None:
default = os.path.join(third_party_dir, "cuda", "lib", "libdevice.10.bc")
else:
default = ''
return os.getenv("TRITON_LIBDEVICE_PATH", default)
@core.extern
def clz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_clz", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_clzll", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def popc(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_popc", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_popcll", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def byte_perm(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("int32"), core.dtype("int32"), core.dtype("int32"),): ("__nv_byte_perm", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def min(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_min", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umin", core.dtype("uint32")),
(core.dtype("int64"), core.dtype("int64"),): ("__nv_llmin", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmin", core.dtype("uint64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fminf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmin", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def max(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_max", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umax", core.dtype("uint32")),
(core.dtype("int64"), core.dtype("int64"),): ("__nv_llmax", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmax", core.dtype("uint64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaxf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmax", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def mulhi(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_mulhi", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umulhi", core.dtype("uint32")),
(core.dtype("int64"), core.dtype("int64"),): ("__nv_mul64hi", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_umul64hi", core.dtype("uint64")),
}, is_pure=True, _builder=_builder)
@core.extern
def mul24(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_mul24", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umul24", core.dtype("uint32")),
}, is_pure=True, _builder=_builder)
@core.extern
def brev(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_brev", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_brevll", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sad(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("int32"), core.dtype("int32"), core.dtype("uint32"),): ("__nv_sad", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32"),): ("__nv_usad", core.dtype("uint32")),
}, is_pure=True, _builder=_builder)
@core.extern
def abs(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_abs", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_llabs", core.dtype("int64")),
(core.dtype("fp32"),): ("__nv_fabsf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_fabs", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def floor(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_floorf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_floor", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcp64h(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_rcp64h", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rsqrt(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_rsqrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rsqrt", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ceil(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_ceil", core.dtype("fp64")),
(core.dtype("fp32"),): ("__nv_ceilf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def trunc(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_trunc", core.dtype("fp64")),
(core.dtype("fp32"),): ("__nv_truncf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def exp2(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_exp2f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp2", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def saturatef(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_saturatef", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fma_rn(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rn", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fma_rz(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rz", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fma_rd(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rd", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fma_ru(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ru", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_dividef(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_fdividef", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def div_rn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rn", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def div_rz(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rz", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def div_rd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rd", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def div_ru(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_ru", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcp_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rn", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_drcp_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcp_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rz", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_drcp_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcp_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rd", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_drcp_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcp_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_ru", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_drcp_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sqrt_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rn", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_dsqrt_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sqrt_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rz", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_dsqrt_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sqrt_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rd", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_dsqrt_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sqrt_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_ru", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_dsqrt_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sqrt(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_sqrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sqrt", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def add_rn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rn", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def add_rz(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rz", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def add_rd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rd", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def add_ru(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_ru", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def mul_rn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rn", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def mul_rz(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rz", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def mul_rd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rd", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def mul_ru(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_ru", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2float_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2float_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2float_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2float_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2int_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rn", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2int_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rz", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2int_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2int_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_ru", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2uint_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rn", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2uint_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rz", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2uint_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2uint_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_ru", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def int2double_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int2double_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint2double_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint2double_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2int_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rn", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2int_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rz", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2int_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2int_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_ru", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2uint_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rn", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2uint_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rz", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2uint_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2uint_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_ru", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def int2float_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def int2float_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def int2float_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def int2float_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint2float_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint2float_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint2float_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint2float_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint2float_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint2float_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint2float_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint2float_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def hiloint2double(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_hiloint2double", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2loint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2loint", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2hiint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2hiint", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ll_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rn", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ll_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rz", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ll_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rd", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ll_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_ru", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ull_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rn", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ull_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rz", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ull_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rd", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def float2ull_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_ru", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ll_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rn", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ll_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rz", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ll_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rd", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ll_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_ru", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ull_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rn", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ull_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rz", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ull_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rd", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double2ull_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_ru", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2float_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2float_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2float_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2float_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2float_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2float_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2float_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2float_rz", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2float_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2float_rd", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2float_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2float_ru", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2double_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2double_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2double_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ll2double_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2double_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2double_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2double_rz(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2double_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2double_rd(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2double_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ull2double_ru(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint64"),): ("__nv_ull2double_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def int_as_float(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_int_as_float", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float_as_int(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float_as_int", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def uint_as_float(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("uint32"),): ("__nv_uint_as_float", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def float_as_uint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_float_as_uint", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def longlong_as_double(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int64"),): ("__nv_longlong_as_double", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def double_as_longlong(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_double_as_longlong", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_sinf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_sinf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_cosf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_cosf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_log2f(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_log2f", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_logf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_logf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_expf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_expf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_tanf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_tanf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_exp10f(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_exp10f", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_log10f(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_log10f", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def fast_powf(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_powf", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def hadd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_hadd", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_uhadd", core.dtype("uint32")),
}, is_pure=True, _builder=_builder)
@core.extern
def rhadd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_rhadd", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_urhadd", core.dtype("uint32")),
}, is_pure=True, _builder=_builder)
@core.extern
def sub_rn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rn", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sub_rz(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rz", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rz", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sub_rd(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rd", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rd", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sub_ru(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_ru", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_ru", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rsqrt_rn(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_frsqrt_rn", core.dtype("fp32")),
}, is_pure=True, _builder=_builder)
@core.extern
def ffs(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("int32"),): ("__nv_ffs", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_ffsll", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def rint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_rintf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rint", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def llrint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_llrintf", core.dtype("int64")),
(core.dtype("fp64"),): ("__nv_llrint", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def nearbyint(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_nearbyintf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_nearbyint", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def isnan(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_isnanf", core.dtype("int32")),
(core.dtype("fp64"),): ("__nv_isnand", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def signbit(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_signbitf", core.dtype("int32")),
(core.dtype("fp64"),): ("__nv_signbitd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def copysign(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_copysignf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_copysign", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def finitef(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_finitef", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def isinf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_isinff", core.dtype("int32")),
(core.dtype("fp64"),): ("__nv_isinfd", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def nextafter(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_nextafterf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_nextafter", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sin(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sin", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cos(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_cosf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cos", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sinpi(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinpif", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sinpi", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cospi(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_cospif", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cospi", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def tan(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_tanf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tan", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def log2(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_log2f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log2", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def exp(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_expf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def exp10(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_exp10f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp10", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cosh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_coshf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cosh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def sinh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sinh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def tanh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_tanhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tanh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def atan2(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_atan2f", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_atan2", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def atan(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_atanf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_atan", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def asin(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_asinf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_asin", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def acos(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_acosf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_acos", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def log(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_logf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def log10(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_log10f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log10", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def log1p(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_log1pf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log1p", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def acosh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_acoshf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_acosh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def asinh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_asinhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_asinh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def atanh(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_atanhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_atanh", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def expm1(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_expm1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_expm1", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def hypot(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_hypotf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_hypot", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rhypot(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rhypotf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rhypot", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def norm3d(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm3df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm3d", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rnorm3d(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm3df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm3d", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def norm4d(arg0, arg1, arg2, arg3, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, arg3, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm4df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm4d", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rnorm4d(arg0, arg1, arg2, arg3, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, arg3, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm4df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm4d", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cbrt(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_cbrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cbrt", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def rcbrt(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_rcbrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rcbrt", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def j0(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_j0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_j0", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def j1(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_j1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_j1", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def y0(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_y0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_y0", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def y1(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_y1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_y1", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def yn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("fp32"),): ("__nv_ynf", core.dtype("fp32")),
(core.dtype("int32"), core.dtype("fp64"),): ("__nv_yn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def jn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("fp32"),): ("__nv_jnf", core.dtype("fp32")),
(core.dtype("int32"), core.dtype("fp64"),): ("__nv_jn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cyl_bessel_i0(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_cyl_bessel_i0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cyl_bessel_i0", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def cyl_bessel_i1(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_cyl_bessel_i1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cyl_bessel_i1", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def erf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_erff", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erf", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def erfinv(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfinv", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def erfc(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfc", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def erfcx(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcxf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfcx", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def erfcinv(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfcinv", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def normcdfinv(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_normcdfinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_normcdfinv", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def normcdf(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_normcdff", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_normcdf", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def lgamma(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_lgammaf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_lgamma", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ldexp(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_ldexpf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_ldexp", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def scalbn(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_scalbnf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_scalbn", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fmod(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmodf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmod", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def remainder(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_remainderf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_remainder", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fma(arg0, arg1, arg2, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def pow(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_powif", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_powi", core.dtype("fp64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_powf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_pow", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def tgamma(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_tgammaf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tgamma", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def round(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_roundf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_round", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def llround(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_llroundf", core.dtype("int64")),
(core.dtype("fp64"),): ("__nv_llround", core.dtype("int64")),
}, is_pure=True, _builder=_builder)
@core.extern
def fdim(arg0, arg1, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdimf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fdim", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def ilogb(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_ilogbf", core.dtype("int32")),
(core.dtype("fp64"),): ("__nv_ilogb", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
@core.extern
def logb(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp32"),): ("__nv_logbf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_logb", core.dtype("fp64")),
}, is_pure=True, _builder=_builder)
@core.extern
def isfinited(arg0, _builder=None):
return core.extern_elementwise("libdevice", libdevice_path(), [arg0, ],
{(core.dtype("fp64"),): ("__nv_isfinited", core.dtype("int32")),
}, is_pure=True, _builder=_builder)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,480
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/pegasus.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
kw=dict(
act_fun="gelu",
d_dec_ffn=4096,
d_enc_ffn=4096,
d_model=1024,
dec_START=0,
drop_act=0.0,
drop_attn=0.0,
drop_dec=0.0,
drop_enc=0.0,
drop_proj=0.0,
drop=0.1,
EOS=1,
forced_EOS=1,
grad_checkpoint=True,
init_std=0.02,
is_enc_dec=True,
model_type="pegasus",
n_dec_heads=16,
n_dec_lays=12,
n_enc_heads=16,
n_enc_lays=12,
n_pos=1024,
PAD=0,
s_vocab=50265,
scale=False,
y_cache=True,
),
)
def __init__(self, **kw):
super().__init__(**kw)
def _init_weights(self, module):
std = self.cfg.init_std
if isinstance(module, qc.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, qc.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_grad_checkpoint(self, module, value=False):
if isinstance(module, (Decoder, Encoder)):
module.grad_checkpoint = value
@property
def n_heads(self):
return self.n_enc_heads
MAP = {
"google/pegasus-large": dict(
act_fun="relu",
add_bias_logits=False,
add_final_norm=True,
archs=["ForCondGen"],
BOS=0,
drop_act=0.1,
drop_attn=0.1,
extra_pos_embeddings=1,
force_bos_token_to_be_generated=False,
grad_checkpoint=False,
id2label={"0": "LABEL_0", "1": "LABEL_1", "2": "LABEL_2"},
label2id={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2},
len_penalty=0.8,
max_len=256,
n_beams=8,
n_dec_lays=16,
n_enc_lays=16,
n_lays=16,
name_or_path="google/pegasus-large",
normalize_embedding=False,
pre_norm=True,
s_vocab=96103,
scale=True,
static_position_embeddings=True,
task_params=dict(
sum_aeslc=dict(
len_penalty=0.6,
max_len=32,
n_pos=512,
),
sum_arxiv=dict(
len_penalty=0.8,
max_len=256,
n_pos=1024,
),
sum_big_patent=dict(
len_penalty=0.7,
max_len=256,
n_pos=1024,
),
sum_billsum=dict(
len_penalty=0.6,
max_len=256,
n_pos=1024,
),
sum_cnn_dailymail=dict(
len_penalty=0.8,
max_len=128,
n_pos=1024,
),
sum_gigaword=dict(
len_penalty=0.6,
max_len=32,
n_pos=128,
),
sum_large=dict(
len_penalty=0.8,
max_len=256,
n_pos=1024,
),
sum_multi_news=dict(
len_penalty=0.8,
max_len=256,
n_pos=1024,
),
sum_newsroom=dict(
len_penalty=0.8,
max_len=128,
n_pos=512,
),
sum_pubmed=dict(
len_penalty=0.8,
max_len=256,
n_pos=1024,
),
sum_reddit_tifu=dict(
len_penalty=0.6,
max_len=128,
n_pos=512,
),
sum_wikihow=dict(
len_penalty=0.6,
max_len=256,
n_pos=512,
),
sum_xsum=dict(
len_penalty=0.8,
max_len=64,
n_pos=512,
),
),
)
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,481
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/dataset/wiki_summary.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import csv
import datasets as ds
_ID = "id"
_LINK = "link"
_TITLE = "title"
_ARTICLE = "article"
_HIGHLIGHTS = "highlights"
_TRAIN = "https://drive.google.com/u/0/uc?id=1-CaP3xHgZxOGjQ3pXC5tr9YnIajmel-t&export=download"
_TEST = "https://drive.google.com/u/0/uc?id=1-9G4yYP6YO8oMA-o4cTe9NJpEyr7x5jg&export=download"
_VALID = "https://drive.google.com/u/0/uc?id=1-2g2gkDeNaN-vth-8Mgit_ovmSkVh91u&export=download"
class WikiSummary(ds.GeneratorBasedBuilder):
VERSION = ds.Version("1.1.0")
def _info(self):
return ds.DatasetInfo(
description="",
citation="",
homepage="",
license="",
features=ds.Features(
{k: ds.Value("string") for k in [_ID, _LINK, _TITLE, _ARTICLE, _HIGHLIGHTS]}
),
)
def _split_generators(self, mgr):
train = mgr.download_and_extract(_TRAIN)
test = mgr.download_and_extract(_TEST)
valid = mgr.download_and_extract(_VALID)
return [
ds.SplitGenerator(name=ds.Split.TRAIN, gen_kw={"filepath": train}),
ds.SplitGenerator(name=ds.Split.TEST, gen_kw={"filepath": test}),
ds.SplitGenerator(name=ds.Split.VALIDATION, gen_kw={"filepath": valid}),
]
def _generate_examples(self, path):
with open(path, encoding="utf8") as f:
r = csv.reader(
f,
quotechar='"',
delimiter="\t",
quoting=csv.QUOTE_ALL,
skipinitialspace=True,
)
for i, row in enumerate(r):
if len(row) == 5:
yield i, {
_ID: row[0],
_LINK: row[1],
_TITLE: row[2],
_ARTICLE: row[3],
_HIGHLIGHTS: row[4],
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,482
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/beam.py
|
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune XLNet for question answering with beam search
import collections
import json
import logging
import numpy as np
import os
import torch
from tqdm.auto import tqdm
from transformers import (
XLNetConfig,
XLNetTokenizerFast,
XLNetForQuestionAnswering,
EvalPrediction,
)
from .params import EVAL, TEST, EACH
from .qa import Runner as Base
from .utils import init_array
log = logging.getLogger(__name__)
class Runner(Base):
AutoConfig = XLNetConfig
AutoTokenizer = XLNetTokenizerFast
AutoModel = XLNetForQuestionAnswering
def prep_for_train(self, xs):
ps, pad_on_right = self.params, self.pad_on_right
q_col, c_col, a_col = self.cols[EACH]
xs[q_col] = [x.lstrip() for x in xs[q_col]]
ys = self.tokenizer(
xs[q_col if pad_on_right else c_col],
xs[c_col if pad_on_right else q_col],
truncation="only_second" if pad_on_right else "only_first",
max_len=self.max_seq_length,
stride=ps.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_len",
)
map = ys.pop("overflow_to_sample_mapping")
specials = ys.pop("special_tokens_mask")
ys["start_positions"] = []
ys["end_positions"] = []
ys["is_impossible"] = []
ys["cls_index"] = []
ys["p_mask"] = []
for i, offs in enumerate(ys.pop("offset_mapping")):
ins = ys["input_ids"][i]
cls = ins.index(self.tokenizer.cls_token_id)
ys["cls_index"].append(cls)
ids = ys["typ_ids"][i]
for k, s in enumerate(specials[i]):
if s:
ids[k] = 3
por = 1 if pad_on_right else 0
ys["p_mask"].append(
[
0.0 if (not specials[i][k] and s == por) or k == cls else 1.0
for k, s in enumerate(ids)
]
)
ans = xs[a_col][map[i]]
if len(ans["answer_start"]) == 0:
ys["start_positions"].append(cls)
ys["end_positions"].append(cls)
ys["is_impossible"].append(1.0)
else:
s = ans["answer_start"][0]
e = s + len(ans["text"][0])
j = 0
while ids[j] != por:
j += 1
k = len(ins) - 1
while ids[k] != por:
k -= 1
if not (offs[j][0] <= s and offs[k][1] >= e):
ys["start_positions"].append(cls)
ys["end_positions"].append(cls)
ys["is_impossible"].append(1.0)
else:
while j < len(offs) and offs[j][0] <= s:
j += 1
ys["start_positions"].append(j - 1)
while offs[k][1] >= e:
k -= 1
ys["end_positions"].append(k + 1)
ys["is_impossible"].append(0.0)
return ys
def prep_for_eval(self, xs):
ps, pad_on_right = self.params, self.pad_on_right
q_col, c_col, _ = self.cols[EACH]
xs[q_col] = [q.lstrip() for q in xs[q_col]]
ys = self.tokenizer(
xs[q_col if pad_on_right else c_col],
xs[c_col if pad_on_right else q_col],
truncation="only_second" if pad_on_right else "only_first",
max_len=self.max_seq_length,
stride=ps.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_len",
)
map = ys.pop("overflow_to_sample_mapping")
specials = ys.pop("special_tokens_mask")
ys["example_id"] = []
ys["cls_index"] = []
ys["p_mask"] = []
for i, ins in enumerate(ys["input_ids"]):
cls = ins.index(self.tokenizer.cls_token_id)
ys["cls_index"].append(cls)
ids = ys["typ_ids"][i]
for k, s in enumerate(specials[i]):
if s:
ids[k] = 3
por = 1 if pad_on_right else 0
ys["p_mask"].append(
[
0.0 if (not specials[i][k] and s == por) or k == cls else 1.0
for k, s in enumerate(ids)
]
)
ys["example_id"].append(xs["id"][map[i]])
ys["offset_mapping"][i] = [
(o if ids[k] == por else None) for k, o in enumerate(ys["offset_mapping"][i])
]
return ys
def eval(self):
ps, mgr, ds = self.params, self.mgr
ds, src = self.eval_ds, self.loaders[EVAL]
log.info("*** Evaluating ***")
log.info(f" Num samples = {len(ds)}")
log.info(f" Batch size per device = {ps.eval_batch_size}")
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for xs in src:
with torch.no_grad():
ys = self.model(**xs)
start_top_log_probs = ys.start_top_log_probs
start_top_index = ys.start_top_index
end_top_log_probs = ys.end_top_log_probs
end_top_index = ys.end_top_index
cls_logits = ys.cls_logits
if not ps.pad_to_max_length:
start_top_log_probs = mgr.pad_across_processes(
start_top_log_probs, dim=1, PAD=-100
)
start_top_index = mgr.pad_across_processes(start_top_index, dim=1, PAD=-100)
end_top_log_probs = mgr.pad_across_processes(end_top_log_probs, dim=1, PAD=-100)
end_top_index = mgr.pad_across_processes(end_top_index, dim=1, PAD=-100)
cls_logits = mgr.pad_across_processes(cls_logits, dim=1, PAD=-100)
all_start_top_log_probs.append(mgr.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(mgr.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(mgr.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(mgr.gather(end_top_index).cpu().numpy())
all_cls_logits.append(mgr.gather(cls_logits).cpu().numpy())
l = max([x.shape[1] for x in all_end_top_log_probs])
start_top_log_probs_concat = init_array(all_start_top_log_probs, ds, l)
start_top_index_concat = init_array(all_start_top_index, ds, l)
end_top_log_probs_concat = init_array(all_end_top_log_probs, ds, l)
end_top_index_concat = init_array(all_end_top_index, ds, l)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
y = self.post_proc(self.evals, ds, outputs_numpy)
y = self.metric.compute(predictions=y.predictions, references=y.label_ids)
log.info(f"Evaluation metrics: {y}")
def post_proc(self, xs, features, preds, stage="eval"):
ps = self.params
ys, diff = proc_preds(
examples=xs,
features=features,
predictions=preds,
version_2_with_negative=ps.version_2_with_negative,
n_best_size=ps.n_best_size,
max_answer_length=ps.max_answer_length,
start_n_top=self.model.config.start_n_top,
end_n_top=self.model.config.end_n_top,
out_dir=ps.out_dir,
prefix=stage,
)
if ps.version_2_with_negative:
ys = [
{"id": k, "prediction_text": v, "no_answer_probability": diff[k]}
for k, v in ys.items()
]
else:
ys = [{"id": k, "prediction_text": v} for k, v in ys.items()]
ids = [{"id": x["id"], "answers": x[self.cols[EACH][2]]} for x in xs]
return EvalPrediction(predictions=ys, label_ids=ids)
def pred(self):
ps, mgr = self.params, self.mgr
if ps.do_test:
ds, src = self.test_ds, self.loaders[TEST]
log.info("*** Prediction ***")
log.info(f" Num samples = {len(ds)}")
log.info(f" Batch size per device = {ps.eval_batch_size}")
all_start_top_log_probs = []
all_start_top_index = []
all_end_top_log_probs = []
all_end_top_index = []
all_cls_logits = []
for xs in src:
with torch.no_grad():
ys = self.model(**xs)
start_top_log_probs = ys.start_top_log_probs
start_top_index = ys.start_top_index
end_top_log_probs = ys.end_top_log_probs
end_top_index = ys.end_top_index
cls_logits = ys.cls_logits
if not ps.pad_to_max_length:
start_top_log_probs = mgr.pad_across_processes(
start_top_log_probs, dim=1, PAD=-100
)
start_top_index = mgr.pad_across_processes(start_top_index, dim=1, PAD=-100)
end_top_log_probs = mgr.pad_across_processes(
end_top_log_probs, dim=1, PAD=-100
)
end_top_index = mgr.pad_across_processes(end_top_index, dim=1, PAD=-100)
cls_logits = mgr.pad_across_processes(cls_logits, dim=1, PAD=-100)
all_start_top_log_probs.append(mgr.gather(start_top_log_probs).cpu().numpy())
all_start_top_index.append(mgr.gather(start_top_index).cpu().numpy())
all_end_top_log_probs.append(mgr.gather(end_top_log_probs).cpu().numpy())
all_end_top_index.append(mgr.gather(end_top_index).cpu().numpy())
all_cls_logits.append(mgr.gather(cls_logits).cpu().numpy())
l = max([x.shape[1] for x in all_end_top_log_probs])
start_top_log_probs_concat = init_array(all_start_top_log_probs, ds, l)
start_top_index_concat = init_array(all_start_top_index, ds, l)
end_top_log_probs_concat = init_array(all_end_top_log_probs, ds, l)
end_top_index_concat = init_array(all_end_top_index, ds, l)
cls_logits_concat = np.concatenate(all_cls_logits, axis=0)
del start_top_log_probs
del start_top_index
del end_top_log_probs
del end_top_index
del cls_logits
outputs_numpy = (
start_top_log_probs_concat,
start_top_index_concat,
end_top_log_probs_concat,
end_top_index_concat,
cls_logits_concat,
)
y = self.post_proc(self.preds, ds, outputs_numpy)
y = self.metric.compute(predictions=y.predictions, references=y.label_ids)
log.info(f"Prediction metrics: {y}")
def proc_preds(
examples,
features,
predictions,
version_2_with_negative=False,
n_best_size=20,
max_answer_length=30,
start_n_top=5,
end_n_top=5,
out_dir=None,
prefix=None,
log_level=logging.WARNING,
):
if len(predictions) != 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
log.setLevel(log_level)
log.info(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
for example_index, example in enumerate(tqdm(examples)):
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
for feature_index in feature_indices:
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
offset_mapping = features[feature_index]["offset_mapping"]
token_is_max_context = features[feature_index].get("token_is_max_context", None)
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
if token_is_max_context is not None and not token_is_max_context.get(
str(start_index), False
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[
:n_best_size
]
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
if len(predictions) == 0:
predictions.insert(
0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6}
)
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
all_nbest_json[example["id"]] = [
{
k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v)
for k, v in pred.items()
}
for pred in predictions
]
if out_dir is not None:
if not os.path.isdir(out_dir):
raise EnvironmentError(f"{out_dir} is not a directory.")
prediction_file = os.path.join(
out_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
out_dir,
"nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json",
)
if version_2_with_negative:
null_odds_file = os.path.join(
out_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
log.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
log.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
log.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
def main():
x = Runner()
x.dataset
x.cols
x.config
x.tokenizer
x.model
x.loaders
x.prepare()
x.train()
x.eval()
x.pred()
x.save()
if __name__ == "__main__":
main()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,483
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/compiler/compiler.py
|
from __future__ import annotations
import functools
import hashlib
import json
import os
import re
import subprocess
import tempfile
from collections import namedtuple
from pathlib import Path
from typing import Any, Tuple
import triton
import triton._C.libtriton.triton as _triton
from ..runtime import driver
# TODO: runtime.errors
from ..runtime.autotuner import OutOfResources
from ..runtime.cache import get_cache_manager
from ..tools.disasm import extract
from .code_generator import ast_to_ttir
from .make_launcher import make_stub
def inline_triton_ir(mod):
pm = _triton.ir.pass_manager(mod.context)
pm.enable_debug()
pm.add_inliner_pass()
pm.run(mod)
return mod
def ttir_compute_capability_rewrite(mod, arch):
# For hardware without support, we must rewrite all load/store
# with block (tensor) pointers into tensors of pointers
pm = _triton.ir.pass_manager(mod.context)
pm.enable_debug()
if _is_cuda(arch):
pm.add_rewrite_tensor_pointer_pass(arch)
pm.run(mod)
return mod
def optimize_ttir(mod, arch):
mod = inline_triton_ir(mod)
mod = ttir_compute_capability_rewrite(mod, arch)
pm = _triton.ir.pass_manager(mod.context)
pm.enable_debug()
pm.add_inliner_pass()
pm.add_triton_combine_pass()
pm.add_canonicalizer_pass()
pm.add_cse_pass()
pm.add_licm_pass()
pm.add_symbol_dce_pass()
pm.run(mod)
return mod
def ttir_to_ttgir(mod, num_warps):
pm = _triton.ir.pass_manager(mod.context)
pm.add_convert_triton_to_tritongpu_pass(num_warps)
pm.run(mod)
return mod
def optimize_ttgir(mod, num_stages, arch):
pm = _triton.ir.pass_manager(mod.context)
pm.enable_debug()
pm.add_tritongpu_coalesce_pass()
pm.add_tritongpu_remove_layout_conversions_pass()
if isinstance(arch, int):
pm.add_tritongpu_accelerate_matmul_pass(arch)
pm.add_tritongpu_remove_layout_conversions_pass()
pm.add_tritongpu_optimize_dot_operands_pass()
pm.add_tritongpu_pipeline_pass(num_stages)
pm.add_tritongpu_prefetch_pass()
pm.add_tritongpu_optimize_dot_operands_pass()
pm.add_tritongpu_remove_layout_conversions_pass()
pm.add_tritongpu_decompose_conversions_pass()
pm.add_tritongpu_reorder_instructions_pass()
pm.add_cse_pass()
pm.add_symbol_dce_pass()
pm.run(mod)
return mod
def _add_external_libs(mod, libs):
for name, path in libs.items():
if len(name) == 0 or len(path) == 0:
return
_triton.add_external_libs(mod, list(libs.keys()), list(libs.values()))
def ttgir_to_llir(mod, extern_libs, arch):
if extern_libs:
_add_external_libs(mod, extern_libs)
# TODO: separate tritongpu_to_llvmir for different backends
if _is_cuda(arch):
return _triton.translate_triton_gpu_to_llvmir(mod, arch, False)
else:
return _triton.translate_triton_gpu_to_llvmir(mod, 0, True)
# PTX translation
@functools.lru_cache()
def ptx_get_version(cuda_version) -> int:
'''
Get the highest PTX version supported by the current CUDA driver.
'''
assert isinstance(cuda_version, str)
major, minor = map(int, cuda_version.split('.'))
if major == 12:
return 80 + minor
if major == 11:
return 70 + minor
if major == 10:
return 63 + minor
raise RuntimeError("Triton only support CUDA 10.0 or higher")
@functools.lru_cache()
def path_to_ptxas():
base_dir = os.path.join(os.path.dirname(__file__), os.pardir)
paths = [
os.environ.get("TRITON_PTXAS_PATH", ""),
os.path.join(base_dir, "third_party", "cuda", "bin", "ptxas")
]
for ptxas in paths:
if os.path.exists(ptxas) and os.path.isfile(ptxas):
result = subprocess.check_output([ptxas, "--version"], stderr=subprocess.STDOUT)
if result is not None:
version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE)
if version is not None:
return ptxas, version.group(1)
raise RuntimeError("Cannot find ptxas")
def llir_to_ptx(mod: Any, arch: int, ptx_version: int = None) -> str:
'''
Translate TritonGPU module to PTX code.
:param mod: a TritonGPU dialect module
:return: PTX code
'''
if ptx_version is None:
_, cuda_version = path_to_ptxas()
ptx_version = ptx_get_version(cuda_version)
return _triton.translate_llvmir_to_ptx(mod, arch, ptx_version)
def ptx_to_cubin(ptx: str, arch: int):
'''
Compile TritonGPU module to cubin.
:param ptx: ptx code
:param compute_capability: compute capability
:return: str
'''
ptxas, _ = path_to_ptxas()
return _triton.compile_ptx_to_cubin(ptx, ptxas, arch)
# AMDGCN translation
def get_amdgcn_bitcode_paths(arch):
gpu_arch_agnostic_bitcode_libraries = ["opencl.bc",
"ocml.bc",
"ockl.bc",
"oclc_finite_only_off.bc",
"oclc_daz_opt_off.bc",
"oclc_correctly_rounded_sqrt_on.bc",
"oclc_unsafe_math_off.bc",
"oclc_wavefrontsize64_on.bc"]
gfx_arch = arch[1]
gfx_arch_id = re.search('gfx(\\w+)', gfx_arch).group(1).strip()
gpu_arch_specific_bitcode_library = 'oclc_isa_version_' + gfx_arch_id + ".bc"
bitcode_path_dir = os.path.join(Path(__file__).parent.resolve(), "third_party/rocm/lib/bitcode/")
amdgcn_bitcode_paths = {}
i = 1
for bc_lib in gpu_arch_agnostic_bitcode_libraries:
bc_path = bitcode_path_dir + bc_lib
if os.path.exists(bc_path):
amdgcn_bitcode_paths['library_' + str(i)] = bc_path
i += 1
bc_gfx_path = bitcode_path_dir + gpu_arch_specific_bitcode_library
if os.path.exists(bc_gfx_path):
amdgcn_bitcode_paths['library_' + str(i)] = bc_gfx_path
return amdgcn_bitcode_paths
def get_amdgpu_arch_fulldetails():
"""
get the amdgpu fulll ISA details for compiling:
i.e., arch_triple: amdgcn-amd-amdhsa; arch_name: gfx906; arch_features: sramecc+:xnack-
"""
try:
# TODO: package rocm.cc with Triton
rocm_path_dir = os.getenv("ROCM_PATH", default="/opt/rocm")
rocminfo = subprocess.check_output(rocm_path_dir + '/bin/rocminfo').decode()
gfx_arch_details = re.search('amd.*', rocminfo).group(0).strip().split('--')
arch_triple = gfx_arch_details[0]
arch_name_features = gfx_arch_details[1].split(':')
arch_name = arch_name_features[0]
arch_features = ""
if (len(arch_name_features) == 3):
arch_features = "+" + re.search('\\w+', arch_name_features[1]).group(0) + ","\
"-" + re.search('\\w+', arch_name_features[2]).group(0)
return [arch_triple, arch_name, arch_features]
except BaseException:
return None
def llir_to_amdgcn_and_hsaco(mod: Any, gfx_arch: str, gfx_triple: str, gfx_features: str) -> Tuple[str, str]:
'''
Translate TritonGPU module to HSACO code based on full details of gpu architecture.
:param mod: a TritonGPU dialect module
:return:
- AMDGCN code
- Path to HSACO object
'''
return _triton.translate_llvmir_to_hsaco(mod, gfx_arch, gfx_triple, gfx_features)
# ------------------------------------------------------------------------------
# compiler
# ------------------------------------------------------------------------------
def get_kernel_name(src: str, pattern: str) -> str:
'''
Get kernel name from PTX code.
This Kernel name is required when launching the kernel.
'''
# There is a name mangling in PTX codegen, so the original kernel names in Triton IR are not available in PTX/cubin.
assert src
for line in src.split('\n'):
line = line.strip()
if line.startswith(pattern):
return line.split()[-1]
def convert_type_repr(x):
match = re.search(r'!tt\.ptr<(.*)>', x)
if match is not None:
return '*' + convert_type_repr(match.group(1))
return x
def make_hash(fn, arch, **kwargs):
if isinstance(fn, triton.runtime.JITFunction):
configs = kwargs["configs"]
signature = kwargs["signature"]
constants = kwargs.get("constants", dict())
num_warps = kwargs.get("num_warps", 4)
num_stages = kwargs.get("num_stages", 3)
debug = kwargs.get("debug", False)
# Get unique key for the compiled code
get_conf_key = lambda conf: (sorted(conf.divisible_by_16), sorted(conf.equal_to_1))
configs_key = [get_conf_key(conf) for conf in configs]
key = f"{fn.cache_key}-{''.join(signature.values())}-{configs_key}-{constants}-{num_warps}-{num_stages}-{debug}-{arch}"
return hashlib.md5(key.encode("utf-8")).hexdigest()
assert isinstance(fn, str)
return hashlib.md5((Path(fn).read_text() + triton.runtime.jit.version_key()).encode("utf-8")).hexdigest()
# - ^\s*tt\.func\s+ : match the start of the string, any leading whitespace, the keyword func,
# and any following whitespace
# - (public\s+)? : optionally match the keyword public and any following whitespace
# - (@\w+) : match an @ symbol followed by one or more word characters
# (letters, digits, or underscores), and capture it as group 1 (the function name)
# - (\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\)) : match a pair of parentheses enclosing
# zero or more arguments separated by commas, and capture it as group 2 (the argument list)
mlir_prototype_pattern = r'^\s*tt\.func\s+(?:public\s+)?(@\w+)(\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\))\s*\{\s*$'
ptx_prototype_pattern = r"\.(?:visible|extern)\s+\.(?:entry|func)\s+(\w+)\s*\(([^)]*)\)"
prototype_pattern = {
"ttir": mlir_prototype_pattern,
"ttgir": mlir_prototype_pattern,
"ptx": ptx_prototype_pattern,
}
mlir_arg_type_pattern = r'%\w+: ([^,^\)\s]+)(?: \{\S+ = \S+ : \S+\})?,?'
ptx_arg_type_pattern = r"\.param\s+\.(\w+)"
arg_type_pattern = {
"ttir": mlir_arg_type_pattern,
"ttgir": mlir_arg_type_pattern,
"ptx": ptx_arg_type_pattern,
}
ttgir_num_warps_pattern = r'"triton_gpu.num-warps"\s?=\s?(\d+)\s?:'
def _get_jsonable_constants(constants):
def _is_jsonable(x):
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
serialized_constants = {}
for constant in constants:
if _is_jsonable(constants[constant]):
serialized_constants[constant] = constants[constant]
return serialized_constants
def parse_mlir_module(path, context):
module = _triton.ir.parse_mlir_module(path, context)
# module takes ownership of the context
module.context = context
return module
instance_descriptor = namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"], defaults=[set(), set()])
# TODO: architecture descriptor class
def _is_cuda(arch):
return isinstance(arch, int)
def get_architecture_descriptor(capability):
try:
import torch
except ImportError:
raise ImportError("Triton requires PyTorch to be installed")
if capability is None:
if torch.version.hip is None:
device = triton.runtime.jit.get_current_device()
capability = triton.runtime.jit.get_device_capability(device)
capability = capability[0] * 10 + capability[1]
else:
capability = get_amdgpu_arch_fulldetails()
return capability
def add_rocm_stages(arch, extern_libs, stages):
extern_libs.update(get_amdgcn_bitcode_paths(arch))
for key in list(extern_libs):
if extern_libs[key] == '' or extern_libs[key] is None:
extern_libs.pop(key)
gfx_arch_full_details = arch
gfx_arch = os.environ.get('MI_GPU_ARCH', gfx_arch_full_details[1])
if gfx_arch is None:
raise RuntimeError('gfx_arch is None (not specified)')
stages["amdgcn"] = (lambda path: Path(path).read_text(),
lambda src: llir_to_amdgcn_and_hsaco(src, gfx_arch,
gfx_arch_full_details[0],
gfx_arch_full_details[2]))
def add_cuda_stages(arch, extern_libs, stages):
stages["ptx"] = (lambda path: Path(path).read_text(),
lambda src: llir_to_ptx(src, arch))
stages["cubin"] = (lambda path: Path(path).read_bytes(),
lambda src: ptx_to_cubin(src, arch))
def compile(fn, **kwargs):
arch = get_architecture_descriptor(kwargs.get("cc", None))
is_cuda = _is_cuda(arch)
context = _triton.ir.context()
asm = dict()
constants = kwargs.get("constants", dict())
num_warps = kwargs.get("num_warps", 4)
num_stages = kwargs.get("num_stages", 3 if is_cuda and arch >= 75 else 2)
extern_libs = kwargs.get("extern_libs", dict())
if extern_libs is None:
extern_libs = dict()
debug = kwargs.get("debug", False)
# build compilation stages
stages = dict()
stages["ast"] = (lambda path: fn, None)
stages["ttir"] = (lambda path: parse_mlir_module(path, context),
lambda src: optimize_ttir(ast_to_ttir(src, signature, configs[0], constants, debug=debug), arch))
stages["ttgir"] = (lambda path: parse_mlir_module(path, context),
lambda src: optimize_ttgir(ttir_to_ttgir(src, num_warps), num_stages, arch))
stages["llir"] = (lambda path: Path(path).read_text(),
lambda src: ttgir_to_llir(src, extern_libs, arch))
if is_cuda:
add_cuda_stages(arch, extern_libs, stages)
else:
add_rocm_stages(arch, extern_libs, stages)
# find out the signature of the function
if isinstance(fn, triton.runtime.JITFunction):
configs = kwargs.get("configs", None)
signature = kwargs["signature"]
if configs is None:
configs = [instance_descriptor()]
assert len(configs) == 1
kwargs["configs"] = configs
name = fn.__name__
first_stage = 0
if isinstance(signature, str):
signature = {k: v.strip() for k, v in enumerate(signature.split(","))}
kwargs["signature"] = signature
else:
assert isinstance(fn, str)
_, ir = os.path.basename(fn).split(".")
src = Path(fn).read_text()
import re
match = re.search(prototype_pattern[ir], src, re.MULTILINE)
name, signature = match.group(1), match.group(2)
types = re.findall(arg_type_pattern[ir], signature)
if ir == 'ttgir':
num_warps_matches = re.findall(ttgir_num_warps_pattern, src)
assert len(num_warps_matches) == 1, "Expected exactly one match for num_warps"
assert "num_warps" not in kwargs or int(num_warps_matches[0]) == num_warps, "num_warps in ttgir does not match num_warps in compile"
num_warps = int(num_warps_matches[0])
param_tys = [convert_type_repr(ty) for ty in types]
signature = {k: v for k, v in enumerate(param_tys)}
first_stage = list(stages.keys()).index(ir)
# cache manager
so_path = make_stub(name, signature, constants)
# create cache manager
fn_cache_manager = get_cache_manager(make_hash(fn, arch, **kwargs))
# determine name and extension type of provided function
if isinstance(fn, triton.runtime.JITFunction):
name, ext = fn.__name__, "ast"
else:
name, ext = os.path.basename(fn).split(".")
# load metadata if any
metadata = None
metadata_filename = f"{name}.json"
# The group is addressed by the metadata
metadata_group = fn_cache_manager.get_group(
metadata_filename
) or {}
metadata_path = metadata_group.get(metadata_filename)
if metadata_path is not None:
with open(metadata_path) as f:
metadata = json.load(f)
else:
metadata = {"num_warps": num_warps,
"num_stages": num_stages,
"constants": _get_jsonable_constants(constants),
"debug": debug}
if ext == "ptx":
assert "shared" in kwargs, "ptx compilation must provide shared memory size"
metadata["shared"] = kwargs["shared"]
first_stage = list(stages.keys()).index(ext)
asm = dict()
module = fn
# run compilation pipeline and populate metadata
for ir, (parse, compile_kernel) in list(stages.items())[first_stage:]:
ir_filename = f"{name}.{ir}"
if ir == ext:
next_module = parse(fn)
else:
path = metadata_group.get(ir_filename)
if path is None:
next_module = compile_kernel(module)
if ir == "amdgcn":
extra_file_name = f"{name}.hsaco_path"
metadata_group[ir_filename] = fn_cache_manager.put(next_module[0], ir_filename)
metadata_group[extra_file_name] = fn_cache_manager.put(next_module[1], extra_file_name)
else:
metadata_group[ir_filename] = fn_cache_manager.put(next_module, ir_filename)
fn_cache_manager.put(next_module, ir_filename)
else:
if ir == "amdgcn":
extra_file_name = f"{name}.hsaco_path"
hasco_path = metadata_group.get(extra_file_name)
assert hasco_path is not None, "Expected to have hsaco_path in metadata when we have the amdgcn"
next_module = (parse(path), parse(hasco_path))
else:
next_module = parse(path)
if ir == "cubin":
asm[ir] = next_module
elif ir == "amdgcn":
asm[ir] = str(next_module[0])
else:
asm[ir] = str(next_module)
if ir == "llir" and "shared" not in metadata:
metadata["shared"] = _triton.get_shared_memory_size(module)
if ir == "ptx":
metadata["name"] = get_kernel_name(next_module, pattern='// .globl')
if ir == "amdgcn":
metadata["name"] = get_kernel_name(next_module[0], pattern='.globl')
asm["hsaco_path"] = next_module[1]
module = next_module
# write-back metadata, if it didn't come from the cache
if metadata_path is None:
metadata_group[metadata_filename] = fn_cache_manager.put(json.dumps(metadata), metadata_filename, binary=False)
fn_cache_manager.put_group(metadata_filename, metadata_group)
# return handle to compiled kernel
return CompiledKernel(fn, so_path, metadata, asm)
class CompiledKernel:
# Hooks for external tools to monitor the execution of triton kernels
launch_enter_hook = None
launch_exit_hook = None
def __init__(self, fn, so_path, metadata, asm):
# initialize launcher
import importlib.util
spec = importlib.util.spec_from_file_location("__triton_launcher", so_path)
mod = importlib.util.module_from_spec(spec)
self.fn = fn
spec.loader.exec_module(mod)
self.c_wrapper = getattr(mod, "launch")
# initialize metadata
self.shared = metadata["shared"]
self.num_warps = metadata["num_warps"]
self.num_stages = metadata["num_stages"]
self.constants = metadata["constants"]
# initialize asm dict
self.asm = asm
# binaries are lazily initialized
# because it involves doing runtime things
# (e.g., checking amount of shared memory on current device)
self.metadata = metadata
self.cu_module = None
self.cu_function = None
def _init_handles(self):
if self.cu_module is not None:
return
device = triton.runtime.jit.get_current_device()
bin_path = {
driver.HIP: "hsaco_path",
driver.CUDA: "cubin"
}[driver.backend]
max_shared = driver.utils.get_device_properties(device)["max_shared_mem"]
if self.shared > max_shared:
raise OutOfResources(self.shared, max_shared, "shared memory")
mod, func, n_regs, n_spills = driver.utils.load_binary(self.metadata["name"], self.asm[bin_path], self.shared, device)
self.n_spills = n_spills
self.n_regs = n_regs
self.cu_module = mod
self.cu_function = func
def __getattribute__(self, name):
if name == 'c_wrapper':
self._init_handles()
return super().__getattribute__(name)
def __getitem__(self, grid):
self._init_handles()
def runner(*args, stream=None):
if stream is None:
stream = triton.runtime.jit.get_cuda_stream()
self.c_wrapper(grid[0], grid[1], grid[2], self.num_warps, self.shared, stream, self.cu_function,
CompiledKernel.launch_enter_hook, CompiledKernel.launch_exit_hook, self, *args)
return runner
def get_sass(self, fun=None):
if 'sass' in self.asm:
return self.asm['sass']
fd, path = tempfile.mkstemp()
try:
with open(fd, 'wb') as cubin:
cubin.write(self.asm['cubin'])
self.sass = extract(path, fun)
finally:
os.remove(path)
self.asm['sass'] = self.sass
return self.sass
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,484
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/dataset/conllpp.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import datasets as ds
_URL = "https://github.com/ZihanWangKi/CrossWeigh/raw/master/data/"
_URLS = {
"train": f"{_URL}conllpp_train.txt",
"valid": f"{_URL}conllpp_dev.txt",
"test": f"{_URL}conllpp_test.txt",
}
class Conllpp(ds.GeneratorBasedBuilder):
BUILDER_CONFIGS = [ds.BuilderConfig(name="conllpp", version=ds.Version("1.0.0"))]
def _info(self):
return ds.DatasetInfo(
description="",
citation="",
homepage="",
license="",
features=ds.Features(
{
"id": ds.Value("string"),
"tokens": ds.Sequence(ds.Value("string")),
"pos_tags": ds.Sequence(
ds.features.ClassLabel(
names=[
'"',
"''",
"#",
"$",
"(",
")",
",",
".",
":",
"``",
"CC",
"CD",
"DT",
"EX",
"FW",
"IN",
"JJ",
"JJR",
"JJS",
"LS",
"MD",
"NN",
"NNP",
"NNPS",
"NNS",
"NN|SYM",
"PDT",
"POS",
"PRP",
"PRP$",
"RB",
"RBR",
"RBS",
"RP",
"SYM",
"TO",
"UH",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
"WDT",
"WP",
"WP$",
"WRB",
]
)
),
"chunk_tags": ds.Sequence(
ds.features.ClassLabel(
names=[
"O",
"B-ADJP",
"I-ADJP",
"B-ADVP",
"I-ADVP",
"B-CONJP",
"I-CONJP",
"B-INTJ",
"I-INTJ",
"B-LST",
"I-LST",
"B-NP",
"I-NP",
"B-PP",
"I-PP",
"B-PRT",
"I-PRT",
"B-SBAR",
"I-SBAR",
"B-UCP",
"I-UCP",
"B-VP",
"I-VP",
]
)
),
"ner_tags": ds.Sequence(
ds.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
"B-MISC",
"I-MISC",
]
)
),
}
),
)
def _split_generators(self, mgr):
fs = mgr.download_and_extract(_URLS)
return [
ds.SplitGenerator(name=ds.Split.TRAIN, gen_kw={"filepath": fs["train"]}),
ds.SplitGenerator(name=ds.Split.VALIDATION, gen_kw={"filepath": fs["dev"]}),
ds.SplitGenerator(name=ds.Split.TEST, gen_kw={"filepath": fs["test"]}),
]
def _generate_examples(self, path):
with open(path, encoding="utf-8") as f:
i = 0
ts = []
pos = []
chunks = []
ners = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if ts:
yield i, {
"id"(i),
"tokens": ts,
"pos_tags": pos,
"chunk_tags": chunks,
"ner_tags": ners,
}
i += 1
ts = []
pos = []
chunks = []
ners = []
else:
splits = line.split(" ")
ts.append(splits[0])
pos.append(splits[1])
chunks.append(splits[2])
ners.append(splits[3].rstrip())
if ts:
yield i, {
"id"(i),
"tokens": ts,
"pos_tags": pos,
"chunk_tags": chunks,
"ner_tags": ners,
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,485
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/debugger/memory_map.py
|
import dataclasses
from triton.debugger import torch_wrapper
torch = torch_wrapper.torch
@dataclasses.dataclass
class RegisteredStorage:
storage: torch.Storage
dtype: torch.dtype
size: int
ptr: int
@property
def end_ptr(self) -> int:
return self.ptr + self.size
@property
def access_tensor(self) -> torch.Tensor:
return torch.tensor(self.storage, dtype=self.dtype, device=self.storage.device)
def ensure_immutable(self):
assert self.storage.data_ptr() == self.ptr and self.storage.size() == self.size
class MemoryMap:
storages: [RegisteredStorage]
def __init__(self):
self.storages = []
def _get_registered_storage(self, pointer: torch.Tensor):
max_pointer = torch.max(pointer).item()
min_pointer = torch.min(pointer).item()
registered_storage = next(
filter(
lambda registered: min_pointer >= registered.ptr and max_pointer < registered.end_ptr, self.storages
),
None,
)
if registered_storage is None:
raise Exception("Storage not found or pointers spanning multiple tensors")
registered_storage.ensure_immutable()
return registered_storage
def add_tensor(self, t: torch.Tensor):
storage = t.untyped_storage()
self.storages.append(RegisteredStorage(storage, t.dtype, storage.size(), storage.data_ptr()))
return t.data_ptr()
def load(
self,
pointer: torch.Tensor,
mask: torch.Tensor = None,
other=0.0,
):
assert pointer.is_cuda
assert 0 < pointer.dim() < 3
assert pointer.dtype == torch.int64
if mask is None:
mask = torch.ones_like(pointer).bool()
assert mask.is_cuda
assert 0 < mask.dim() < 3
assert mask.dtype == torch.bool
mask = mask.expand(pointer.size())
if torch.all(~mask):
# Todo: The type is wrong here, we can't determine the correct type
return torch.full_like(pointer, fill_value=other, dtype=torch.float16, device="cuda")
registered_storage = self._get_registered_storage(pointer[mask])
access_tensor = registered_storage.access_tensor
index_tensor = pointer - registered_storage.ptr
block = torch.full_like(pointer, fill_value=other, dtype=access_tensor.dtype, device="cuda")
block[mask] = access_tensor[index_tensor[mask]]
return block
def store(self, pointer: torch.Tensor, value: torch.Tensor, mask=None):
assert 0 < pointer.dim() < 3
assert pointer.dtype == torch.int64
if mask is None:
mask = torch.ones_like(pointer).bool()
assert 0 < mask.dim() < 3
assert mask.dtype == torch.bool
mask = mask.expand(pointer.size())
if torch.all(~mask):
return
registered_storage = self._get_registered_storage(pointer[mask])
access_tensor = registered_storage.access_tensor
index_tensor = pointer - registered_storage.ptr
access_tensor[index_tensor[mask]] = value[mask].to(access_tensor.dtype)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,486
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/graph.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import networkx as nx
from .base import Record
feeder = 0, 1
bridger = 1, 1
sinker = 1, 0
class DiGraph(nx.DiGraph):
def empty_recs(self):
for m, d in dict(self.nodes(data=True)).items():
if d.get('empty', False):
yield m
def linked_recs(self, kind):
i, o = kind
ms = (m for m, d in self.in_degree() if d == i)
for m in (m for m, d in self.out_degree(ms) if d == o):
if m in self:
if self.in_degree(m) == i and self.out_degree(m) == o:
p = self.predecessors(m)[0] if i else None
s = self.successors(m)[0] if o else None
yield p, m, s
def purge_recs(self):
if self.size():
for m in self.nodes():
if not self.degree(m):
self.remove_node(m)
def remove_msg(self, msg):
if msg in self:
for p in self.predecessors(msg):
for s in self.successors(msg):
self.add_edge(p, s)
self.remove_node(msg)
class Graphs:
@classmethod
def init_class(cls):
for g in cls._graphs:
setattr(cls, '_' + g, None)
def make_getter(name):
n = '_' + name
def get(self):
if getattr(self, n) is None:
setattr(self, n, DiGraph())
return getattr(self, n)
return get
setattr(cls, g, property(make_getter(g)))
def __init__(self, seed=(), **kw):
super().__init__(**kw)
for i in seed:
self.add_item(i)
@property
def graphs(self):
return (getattr(self, n) for n in self._graphs)
def msg_attrs(self, txt, kind, **kw):
kw.update(empty=not bool(txt), kind=kind)
return kw
def add_item(self, item, cntr, **kw):
f, s, k = item
if issubclass(k, Record):
self.record.add_node(f, **self.msg_attrs(s, k, **kw))
cntr.incr('record')
else:
getattr(self, k.label).add_edge(f, s)
cntr.incr(k.label)
def check(self):
pass
def grow_from(self, src, adjs=None, **kw):
for i in src:
self.add_item(i, **kw)
if adjs:
self.adjust_from(adjs)
self.check()
def purge_empty(self, cntr, **_):
for m in self.record.empty_recs():
for g in self.graphs:
g.remove_msg(m)
cntr.incr('d')
self.check()
"""
@property
def comps(self):
return nx.weakly_connected_components(self.nxdg)
def roots(self, comp):
return sorted(n for n, d in self.nxdg.in_degree(comp) if not d)
def nodes(self, root):
return nx.dfs_preorder_nodes(self.nxdg, root)
def merge(self, other):
return self
def init_from(self, src):
pass
def adjust_from(self, src):
pass
import contextlib as cl
@cl.contextmanager
def graph(path, directed=True, **kw):
g = nx.DiGraph(**kw) if directed else nx.Graph(**kw)
yield g
a = nx.nx_agraph.to_agraph(g)
#p = str(path)
# g.write_dot(p)
#g = gv.AGraph()
# g.read(p)
a.draw(str(path.with_suffix('.png')), prog="neato")
import pygraphviz as gv
A = gv.AGraph()
A.node_attr['style'] = 'filled'
A.node_attr['shape'] = 'circle'
A.node_attr['fixedsize'] = 'true'
A.node_attr['fontcolor'] = '#FFFFFF'
for i in range(16):
A.add_edge(0, i)
n = A.get_node(i)
n.attr['fillcolor'] = "#%2x0000" % (i * 16)
n.attr['height'] = "%s" % (i / 16.0 + 0.5)
n.attr['width'] = "%s" % (i / 16.0 + 0.5)
print(A.string())
A.write("/tmp/star.dot")
print("Wrote star.dot")
A.draw('/tmp/star.png', prog="circo")
print("Wrote star.png")
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,487
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/activism.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
from .judgment import Judgment
class Activism(Node):
claims = judgments = None
def __init__(self, text=None, judgments=None, authority=None, **kw):
super().__init__(**kw)
if self.claims is None:
self.claims, self.judgments = [], []
if text:
for k in ('factor', 'bias', 'weight'):
kw.pop(k, None)
self.claims.append(Claim(text=text, **kw))
if judgments:
js = (j.strip() for j in judgments.split('|') if ':' in j)
self.judgments.extend(Judgment.create(name=j) for j in js if j)
if authority:
self.authority = Authority.create(name=authority)
@property
def weight(self):
cs = tuple(c.weight for c in self.claims)
js = tuple(j.weight for j in self.judgments)
return self.partial(cs, js) + self.bias
@property
def turmoil(self):
return self.weight
@property
def value(self):
t = self.turmoil
return '{} {}: T{}'.format(super().value, self.authority.agency, t)
@property
def fields(self):
fs = super().fields
fs['Activism'] = self.name
ls = []
for c in self.claims:
fs2 = c.fields
fs2.update(fs)
fs2['Turmoil'] = self.partial(c.weight)
ls.append(fs2)
for j in sorted(self.judgments, key=lambda j: j.sequence):
fs2 = c.fields
fs2['Topic'] = fs['Topic']
fs2['Narrative'] = fs['Narrative']
fs2['Activism'] = fs['Activism']
fs2['Turmoil'] = self.partial(j.weight)
ls.append(fs2)
return ls
class Exclude(Activism):
sign = '@x'
class Insinuate(Activism):
sign = '@i'
class Polarize(Activism):
sign = '@o'
class Recast(Activism):
sign = '@r'
class Elevate(Activism):
sign = '@e'
class Victimize(Activism):
sign = '@v'
class Exploit(Activism):
sign = '@t'
class Perpetuate(Activism):
sign = '@p'
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,488
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/operators/test_flash_attention.py
|
import pytest
import torch
import triton
import triton.ops
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(4, 48, 1024, 64)])
@pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16])
def test_op(Z, H, N_CTX, D_HEAD, dtype):
capability = torch.cuda.get_device_capability()
if capability[0] < 8:
pytest.skip("Flash attention only supported for compute capability < 80")
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.1, std=0.2).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.4, std=0.2).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.3, std=0.2).requires_grad_()
sm_scale = 0.2
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
for z in range(Z):
for h in range(H):
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).to(dtype)
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# # triton implementation
tri_out = triton.ops.attention(q, k, v, sm_scale)
# print(ref_out)
# print(tri_out)
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
atol = 1e-1 if dtype == torch.bfloat16 else 1e-2
torch.testing.assert_allclose(ref_out, tri_out, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dv, tri_dv, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dk, tri_dk, atol=atol, rtol=0)
torch.testing.assert_allclose(ref_dq, tri_dq, atol=atol, rtol=0)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,489
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/gptj.py
|
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.fx
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
is_torch_fx_proxy,
logging,
)
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from .configuration_gptj import GPTJConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
_REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
_CONFIG_FOR_DOC = "GPTJConfig"
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
"EleutherAI/gpt-j-6B",
# See all GPT-J models at https://huggingface.co/models?filter=gptj
]
def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
sinusoid_inp = torch.einsum(
"i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq
).float()
return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
@torch.fx.wrap
def get_embed_positions(embed_positions, position_ids):
return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
def rotate_every_two(x) -> torch.Tensor:
x1 = x[:, :, :, ::2]
x2 = x[:, :, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(tensor, sin, cos) -> torch.Tensor:
sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
return (tensor * cos) + (rotate_every_two(tensor) * sin)
class GPTJAttention(nn.Module):
def __init__(self, config):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e9))
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.embed_dim = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_attention_heads
if self.head_dim * self.num_attention_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
f" `num_attention_heads`: {self.num_attention_heads})."
)
self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(
torch.get_default_dtype()
)
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.rotary_dim = config.rotary_dim
pos_embd_dim = self.rotary_dim or self.embed_dim
self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
"""
Splits hidden dim into attn_head_size and num_attention_heads
"""
new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
tensor = tensor.view(new_shape)
if rotary:
return tensor
if len(tensor.shape) == 5:
return tensor.permute(
0, 1, 3, 2, 4
) # (batch, blocks, head, block_length, head_features)
elif len(tensor.shape) == 4:
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
else:
raise ValueError(
f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}"
)
def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden dim
"""
if len(tensor.shape) == 5:
tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
elif len(tensor.shape) == 4:
tensor = tensor.permute(0, 2, 1, 3).contiguous()
else:
raise ValueError(
f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}"
)
new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
return tensor.view(new_shape)
def _attn(
self,
query,
key,
value,
attention_mask=None,
head_mask=None,
):
# compute causal mask from causal mask buffer
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
# Keep the attention weights computation in fp32 to avoid overflow issues
query = query.to(torch.float32)
key = key.to(torch.float32)
attn_weights = torch.matmul(query, key.transpose(-1, -2))
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
attn_weights = attn_weights / self.scale_attn
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = attn_weights.to(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _get_embed_positions(self, position_ids):
embed_positions = self.embed_positions
if embed_positions.device != position_ids.device:
embed_positions = embed_positions.to(position_ids.device)
self.embed_positions = embed_positions
return embed_positions.repeat(position_ids.shape[0], 1, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
layer_past=None,
attention_mask=None,
position_ids=None,
head_mask=None,
use_cache=False,
output_attentions=False,
) -> Union[
Tuple[torch.Tensor, Tuple[torch.Tensor]],
Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
]:
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
if is_torch_fx_proxy(position_ids):
# The logic to conditionally copy to GPU could not be traced, so we do this
# every time in the torch.fx case
embed_positions = get_embed_positions(self.embed_positions, position_ids)
else:
embed_positions = self._get_embed_positions(position_ids)
repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
sincos = torch.gather(embed_positions, 1, repeated_position_ids)
sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
if self.rotary_dim is not None:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
key = torch.cat([k_rot, k_pass], dim=-1)
query = torch.cat([q_rot, q_pass], dim=-1)
else:
key = apply_rotary_pos_emb(key, sin, cos)
query = apply_rotary_pos_emb(query, sin, cos)
key = key.permute(0, 2, 1, 3)
query = query.permute(0, 2, 1, 3)
if layer_past is not None:
past_key = layer_past[0]
past_value = layer_past[1]
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
# compute self-attention: V x Softmax(QK^T)
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class GPTJMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
super().__init__()
embed_dim = config.n_embd
self.fc_in = nn.Linear(embed_dim, intermediate_size)
self.fc_out = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPTJBlock(nn.Module):
def __init__(self, config):
super().__init__()
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.attn = GPTJAttention(config)
self.mlp = GPTJMLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[torch.FloatTensor],
layer_past=None,
attention_mask=None,
position_ids=None,
head_mask=None,
use_cache=False,
output_attentions=False,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states=hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = attn_output + feed_forward_hidden_states + residual
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions)
class GPTJPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTJConfig
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
_no_split_modules = ["GPTJBlock"]
def __init__(self, *inputs, **kw):
super().__init__(*inputs, **kw)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear,)):
# Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, GPTJModel):
module.gradient_checkpointing = value
class GPTJModel(GPTJPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.n_embd
self.vocab_size = config.vocab_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def parallelize(self, device_map=None):
warnings.warn(
"`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
" model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
" ...}",
FutureWarning,
)
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = (
"cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
)
self.last_device = "cuda:" + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
def deparallelize(self):
warnings.warn(
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
FutureWarning,
)
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1]).long()
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(
past_length, input_shape[-1] + past_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x num_attention_heads x N x N
# head_mask has shape n_layer x batch x num_attention_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
hidden_states = inputs_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(
past_state.to(hidden_states.device) for past_state in layer_past
)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
position_ids,
head_mask[i],
)
else:
outputs = block(
hidden_states=hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class GPTJForCausalLM(GPTJPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPTJModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def parallelize(self, device_map=None):
warnings.warn(
"`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
" your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
" 0, 'transformer.h.1': 1, ...}",
FutureWarning,
)
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
warnings.warn(
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
FutureWarning,
)
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, inputs_embeds=None, **kw
):
token_type_ids = kw.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kw
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kw.get("attention_mask", None)
position_ids = kw.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kw.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
)
return model_inputs
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
# make sure sampling in fp16 works correctly and
# compute loss in fp32 to match with mesh-tf version
# https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
lm_logits = self.lm_head(hidden_states).to(torch.float32)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
loss = loss.to(hidden_states.dtype)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx
) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return tuple(
tuple(
past_state.index_select(0, beam_idx.to(past_state.device))
for past_state in layer_past
)
for layer_past in past_key_values
)
class GPTJForSequenceClassification(GPTJPreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"h\.\d+\.attn\.masked_bias",
r"h\.\d+\.attn\.bias",
r"lm_head.weight",
]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPTJModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(
logits.device
)
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
labels = labels.to(pooled_logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
class GPTJForQuestionAnswering(GPTJPreTrainedModel):
_keys_to_ignore_on_load_missing = [
r"h\.\d+\.attn\.masked_bias",
r"h\.\d+\.attn\.bias",
r"lm_head.weight",
]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPTJModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,490
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/core/params.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
from transformers import MODEL_MAPPING, SchedulerType
TRAIN = "train"
EVAL = "validation"
TEST = "test"
ALL = "all"
EACH = "each"
LABEL = "label"
MODEL_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(c.model_type for c in MODEL_CLASSES)
LR_TYPES = [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
]
question_answering_column_name_mapping = {
"squad_v2": ("question", "context", "answer"),
}
def parse_params(xs):
x = argparse.ArgumentParser()
x.add_argument("--answer_column", type=str, default="answers")
x.add_argument("--block_size", type=int, default=None)
x.add_argument("--cache_dir", type=str, default=None)
x.add_argument("--config_name", type=str, default=None)
x.add_argument("--config_overrides", type=str, default=None)
x.add_argument("--context_column", type=str, default="context")
x.add_argument("--cuda", action="store_true")
x.add_argument("--dataset_config", type=str, default=None)
x.add_argument("--dataset_name", type=str, default=None)
x.add_argument("--debug", action="store_true")
x.add_argument("--do_eval", action="store_true")
x.add_argument("--do_test", action="store_true")
x.add_argument("--do_train", action="store_true")
x.add_argument("--doc_stride", type=int, default=128)
x.add_argument("--eval_batch_size", type=int, default=8)
x.add_argument("--eval_file", type=str, default=None)
x.add_argument("--feature_extractor", type=str, default=None)
x.add_argument("--grad_accumulation_steps", type=int, default=1)
x.add_argument("--hub_model_id", type=str)
x.add_argument("--hub_token", type=str)
x.add_argument("--ignore_pad_token_for_loss", type=bool, default=True)
x.add_argument("--label_all_tokens", action="store_true")
x.add_argument("--label_column", type=str, default="label")
x.add_argument("--language", type=str, default=None)
x.add_argument("--line_by_line", type=bool, default=False)
x.add_argument("--lower_case", type=bool, default=False)
x.add_argument("--lr_scheduler", type=SchedulerType, default="linear", choices=LR_TYPES)
x.add_argument("--lr", type=float, default=5e-5)
x.add_argument("--max_answer_length", type=int, default=30)
x.add_argument("--max_duration", type=float, default=20.0)
x.add_argument("--max_eval_samples", type=int, default=None)
x.add_argument("--max_len", type=int, default=128)
x.add_argument("--max_seq_length", type=int, default=384) # 512
x.add_argument("--max_source_length", type=int, default=1024)
x.add_argument("--max_span_length", type=int, default=5)
x.add_argument("--max_target_length", type=int, default=128)
x.add_argument("--max_test_samples", type=int, default=None)
x.add_argument("--max_train_samples", type=int, default=None)
x.add_argument("--max_train_steps", type=int, default=None)
x.add_argument("--min_duration", type=float, default=0.0)
x.add_argument("--mlm_probability", type=float, default=0.15)
x.add_argument("--model_name", type=str, required=True)
x.add_argument("--model_type", type=str, default=None, choices=MODEL_TYPES)
x.add_argument("--model_version", type=str, default="main")
x.add_argument("--n_best_size", type=int, default=20)
x.add_argument("--no_keep_linebreaks", action="store_true")
x.add_argument("--null_score_diff_threshold", type=float, default=0.0)
x.add_argument("--n_beams", type=int, default=None)
x.add_argument("--num_warmup_steps", type=int, default=0)
x.add_argument("--num_workers", type=int, default=4)
x.add_argument("--out_dir", type=str, default=None)
x.add_argument("--overwrite_cache", type=bool, default=False)
x.add_argument("--pad_to_max_length", action="store_true")
x.add_argument("--plm_probability", type=float, default=1 / 6)
x.add_argument("--push_to_hub", action="store_true")
x.add_argument("--question_column", type=str, default="question")
x.add_argument("--return_entity_metrics", action="store_true")
x.add_argument("--seed", type=int, default=55)
x.add_argument("--source_lang", type=str, default=None)
x.add_argument("--source_prefix", type=str, default=None)
x.add_argument("--split_percent", default=5)
x.add_argument("--summary_column", type=str, default=None)
x.add_argument("--target_lang", type=str, default=None)
x.add_argument("--test_file", type=str, default=None)
x.add_argument("--test_with_gen", type=bool, default=True)
x.add_argument("--text_column", type=str, default="text")
x.add_argument("--tokenizer_name", type=str, default=None)
x.add_argument("--train_batch_size", type=int, default=8)
x.add_argument("--train_epochs", type=int, default=3)
x.add_argument("--train_file", type=str, default=None)
x.add_argument("--train_language", type=str, default=None)
x.add_argument("--use_auth_token", type=bool, default=False)
x.add_argument("--use_fast_tokenizer", type=bool, default=True)
x.add_argument("--use_slow_tokenizer", action="store_true")
x.add_argument("--val_max_target_length", type=int, default=None)
x.add_argument("--version_2_with_negative", type=bool, default=False)
x.add_argument("--weight_decay", type=float, default=0.0)
for n, kw in xs:
x.add_argument(n, **kw)
y = x.parse_args()
if (
y.dataset_name is None
and y.train_file is None
and y.eval_file is None
and y.test_file is None
):
raise ValueError("Need either a dataset name or a train/eval/test file")
else:
if y.train_file is not None:
y = y.train_file.split(".")[-1]
assert y in ["csv", "json", "txt"], "`train_file` should be a csv or a json file"
if y.eval_file is not None:
y = y.eval_file.split(".")[-1]
assert y in ["csv", "json", "txt"], "`eval_file` should be a csv or a json file"
if y.test_file is not None:
y = y.test_file.split(".")[-1]
assert y in ["csv", "json", "txt"], "`test_file` should be a csv or a json file"
if y.push_to_hub:
assert y.output_dir is not None, "Need an `output_dir` for repo with `--push_to_hub`"
return y
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,491
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/tools/disasm.py
|
# MIT License
# Copyright (c) 2020 Da Yan @ HKUST
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import subprocess
FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*')
SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*')
FNAME_RE = re.compile(r'\s*Function : (\w+)\s*')
BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);')
def parseCtrl(sline):
enc = int(SLINE_RE.match(sline).group(1), 16)
stall = (enc >> 41) & 0xf
yld = (enc >> 45) & 0x1
wrtdb = (enc >> 46) & 0x7
readb = (enc >> 49) & 0x7
watdb = (enc >> 52) & 0x3f
yld_str = 'Y' if yld == 0 else '-'
wrtdb_str = '-' if wrtdb == 7 else str(wrtdb)
readb_str = '-' if readb == 7 else str(readb)
watdb_str = '--' if watdb == 0 else f'{watdb:02d}'
return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}'
def processSassLines(fline, sline, labels):
asm = FLINE_RE.match(fline).group(1)
# Remove tailing space
if asm.endswith(" ;"):
asm = asm[:-2] + ";"
ctrl = parseCtrl(sline)
# BRA target address
if BRA_RE.match(asm) is not None:
target = int(BRA_RE.match(asm).group(2), 16)
if target in labels:
pass
else:
labels[target] = len(labels)
return (f'{ctrl}', f'{asm}')
def extract(file_path, fun):
if fun is None:
sass_str = subprocess.check_output(["cuobjdump", "-sass", file_path])
else:
sass_str = subprocess.check_output(["cuobjdump", "-fun", fun, "-sass", file_path])
sass_lines = sass_str.splitlines()
line_idx = 0
while line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
# format:
# function : <function_name>
# .headerflags: ...
# /*0000*/ asmstr /*0x...*/
# /*0x...*/
# Looking for new function header (function: <name>)
while FNAME_RE.match(line) is None:
line_idx += 1
if line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
else:
return
fname = FNAME_RE.match(line).group(1)
ret = ''
ret += f'Function:{fname}\n'
line_idx += 2 # bypass .headerflags
line = sass_lines[line_idx].decode()
# Remapping address to label
labels = {} # address -> label_idx
# store sass asm in buffer and them print them (for labels)
# (ctrl, asm)
asm_buffer = []
while FLINE_RE.match(line) is not None:
# First line (Offset ASM Encoding)
fline = sass_lines[line_idx].decode()
line_idx += 1
# Second line (Encoding)
sline = sass_lines[line_idx].decode()
line_idx += 1
asm_buffer.append(processSassLines(fline, sline, labels))
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convention: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16
if offset in labels:
label_name = f'LBB{labels[offset]}'
ret += f'{label_name}:\n'
ret += ctrl + '\t'
# if this is BRA, remap offset to label
if BRA_RE.match(asm):
target = int(BRA_RE.match(asm).group(2), 16)
target_name = f'LBB{labels[target]}'
asm = BRA_RE.sub(rf'\1{target_name};', asm)
ret += asm + '\n'
ret += '\n'
return ret
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,492
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/core/mlp.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
from torch import nn
from .. import core as qc
from . import utils as qu
class Llama(qc.Module):
hs = qc.Hypers({"act", "d_ff", "d_model", "drop"}, {})
def __init__(self, d_ff=None, ps={}, hs=[], **kw):
if d_ff is not None:
kw.update(d_ff=d_ff)
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
self.gate_proj = qc.Linear(cfg.d_model, cfg.d_ff, bias=False, **kw)
self.down_proj = qc.Linear(cfg.d_ff, cfg.d_model, bias=False, **kw)
self.up_proj = nn.Linear(cfg.d_model, cfg.d_ff, bias=False, **kw)
self.act = qu.activation(cfg.act)
def forward(self, x):
return self.down_proj(self.act(self.gate_proj(x)) * self.up_proj(x))
class GPT(qc.Module):
hs = qc.Hypers({"act", "d_ff", "d_model", "drop"}, {})
def __init__(self, d_ff=None, ps={}, hs=[], **kw):
if d_ff is not None:
kw.update(d_ff=d_ff)
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
self.lin = qc.Conv1D(cfg.d_ff, cfg.d_model, **kw)
self.proj = qc.Conv1D(cfg.d_model, cfg.d_ff, **kw)
self.act = qu.activation(cfg.act)
self.drop = nn.Dropout(cfg.drop, **kw)
def forward(self, x):
y = self.lin(x)
y = self.act(y)
y = self.proj(y)
y = self.drop(y)
return y
class MLP(qc.Module):
hs = qc.Hypers(
{"act", "chunk_ff", "d_ff", "d_model", "drop", "eps"},
{"len_dim": 1},
)
def __init__(self, act=None, drop=None, eps=None, ps={}, hs=[], **kw):
if act is not None:
kw.update(act=act)
if drop is not None:
kw.update(drop=drop)
if eps is not None:
kw.update(eps=eps)
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
self.lin = qc.Linear(cfg.d_model, cfg.d_ff, **kw)
self.act = None if cfg.act is None else qu.activation(cfg.act)
self.proj = qc.Linear(cfg.d_ff, cfg.d_model, **kw)
self.drop = None if cfg.drop is None else qc.Dropout(cfg.drop, **kw)
self.norm = None if cfg.eps is None else qc.LayerNorm(cfg.d_model, cfg.eps, **kw)
def forward(self, *xs):
cfg = self.cfg
chunk, dim = cfg.chunk_ff, cfg.len_dim
assert len(xs) > 0
if chunk > 0:
shape = xs[0].shape[dim]
for x in xs:
assert x.shape[dim] == shape
assert xs[0].shape[dim] % chunk == 0
n = xs[0].shape[dim] // chunk
ys = tuple(x.chunk(n, dim=dim) for x in xs)
ys = tuple(self.chunker(*y) for y in zip(*ys))
return torch.cat(ys, dim=dim)
return self.chunker(*xs)
def chunker(self, x):
y = self.lin(x)
if self.act:
y = self.act(y)
# if self.drop:
# y = self.drop(y)
y = self.proj(y)
if self.drop:
y = self.drop(y)
if self.norm:
y = self.norm(x + y)
return y
class Predictor(qc.Module):
hs = qc.Hypers({"d_model", "d_lin", "eps", "s_vocab"}, {"act": "gelu"})
def __init__(self, d_lin=None, act=None, ps={}, hs=[], **kw):
if d_lin is not None:
kw.update(d_lin=d_lin)
if act is not None:
kw.update(act=act)
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
m = cfg.d_model
n = cfg.d_lin or m
self.lin = qc.Linear(m, n, **kw)
self.act = qu.activation(cfg.act)
self.norm = qc.LayerNorm(n, cfg.eps, **kw)
self.proj = qc.Linear(n, cfg.s_vocab, bias=False, **kw)
self.bias = nn.Parameter(torch.zeros(cfg.s_vocab))
self.proj.bias = self.bias
def forward(self, x):
y = self.lin(x)
y = self.act(y)
y = self.norm(y)
y = self.proj(y)
return y
class Classifier(qc.Module):
hs = qc.Hypers({"d_model", "d_lin", "drop", "drop_proj", "n_labels"}, {"act": "tanh"})
def __init__(self, d_lin=None, act=None, **kw):
if d_lin is not None:
kw.update(d_lin=d_lin)
if act is not None:
kw.update(act=act)
super().__init__(**kw)
cfg = self.get_cfg(kw)
if cfg.d_lin is None:
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
else:
self.lin = qc.Linear(cfg.d_model, cfg.d_lin, **kw)
self.act = qu.activation(cfg.act)
self.proj = qc.Linear(cfg.d_lin, cfg.n_labels, **kw)
p = cfg.drop_proj if cfg.drop_proj is not None else cfg.drop
self.drop = None if p is None else qc.Dropout(p, **kw)
def forward(self, x):
y = x # [:, 0, :] take <s> token (equiv. to [CLS])
if self.cfg.d_lin is not None:
if self.drop:
y = self.drop(y)
y = self.lin(y)
y = self.act(y)
if self.drop:
y = self.drop(y)
y = self.proj(y)
return y
class Pool(qc.Module):
hs = qc.Hypers(["d_model"], {})
def __init__(self, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
self.lin = qc.Linear(cfg.d_model, cfg.d_model, **kw)
self.act = nn.Tanh()
def forward(self, x):
y = self.lin(x[:, 0])
y = self.act(y)
return y
class PoolBeg(qc.Module):
def __init__(self, cfg):
super().__init__()
self.proj = qc.Linear(cfg.d_model, 1)
def forward(self, x, mask=None):
y = self.proj(x).squeeze(-1)
if mask is not None:
if self.get_param_dtype() == torch.float16:
y = y * (1 - mask) - 65500 * mask
else:
y = y * (1 - mask) - 1e30 * mask
return y
class PoolEnd(qc.Module):
def __init__(self, cfg):
super().__init__()
self.ff = qc.Linear(cfg.d_model * 2, cfg.d_model)
self.act = nn.Tanh()
self.norm = qc.LayerNorm(cfg.d_model, cfg.eps)
self.proj = qc.Linear(cfg.d_model, 1)
def forward(self, x, x_beg=None, beg_pos=None, mask=None):
assert x_beg is not None or beg_pos is not None
if beg_pos is not None:
slen, hsz = x.shape[-2:]
beg_pos = beg_pos[:, None, None].expand(-1, -1, hsz)
x_beg = x.gather(-2, beg_pos)
x_beg = x_beg.expand(-1, slen, -1)
y = self.ff(torch.cat([x, x_beg], dim=-1))
y = self.act(y)
y = self.norm(y)
y = self.proj(y).squeeze(-1)
if mask is not None:
if self.get_param_dtype() == torch.float16:
y = y * (1 - mask) - 65500 * mask
else:
y = y * (1 - mask) - 1e30 * mask
return y
class PoolProj(qc.Module):
def __init__(self, cfg):
super().__init__()
self.ff = qc.Linear(cfg.d_model * 2, cfg.d_model)
self.act = nn.Tanh()
self.proj = qc.Linear(cfg.d_model, 1, bias=False)
def forward(self, x, x_beg=None, beg_pos=None, idx=None):
hsz = x.shape[-1]
assert x_beg is not None or beg_pos is not None
if beg_pos is not None:
beg_pos = beg_pos[:, None, None].expand(-1, -1, hsz)
x_beg = x.gather(-2, beg_pos).squeeze(-2)
if idx is not None:
idx = idx[:, None, None].expand(-1, -1, hsz)
cls_token_state = x.gather(-2, idx).squeeze(-2)
else:
cls_token_state = x[:, -1, :]
y = self.ff(torch.cat([x_beg, cls_token_state], dim=-1))
y = self.act(y)
y = self.proj(y).squeeze(-1)
return y
class Positionwise(qc.Module):
hs = qc.Hypers({"d_ff", "d_model", "drop"}, {"eps": 1e-5, "pre_norm": False})
def __init__(self, ps={}, hs=[], **kw):
super().__init__(ps, [self.hs] + hs, **kw)
cfg = self.get_cfg(kw)
m, f = cfg.d_model, cfg.d_ff
self.ff = nn.Sequential(
qc.Linear(m, f, **kw),
nn.ReLU(inplace=True),
qc.Dropout(cfg.drop, **kw),
qc.Linear(f, m, **kw),
qc.Dropout(cfg.drop, **kw),
)
self.norm = qc.LayerNorm(m, **kw)
def forward(self, x):
if self.cfg.pre_norm:
return x + self.ff(self.norm(x))
return self.norm(x + self.ff(x))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,493
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/exporter.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import markdown
import pathlib as pth
from .base import config
markdown_settings = {
'extension_configs': {
'markdown.extensions.codehilite': {
'css_class': 'highlight'
},
'markdown.extensions.extra': {},
'markdown.extensions.meta': {},
},
'extensions': [
'markdown.extensions.codehilite', 'markdown.extensions.extra',
'markdown.extensions.meta', 'markdown.extensions.toc',
'markdown.extensions.fenced_code'
],
'output_format':
'html5',
}
class Exporter:
_topic = None
_subject = None
html_frame = None
markdown = markdown.Markdown(**markdown_settings)
@classmethod
def frame(cls):
if not cls.html_frame:
t = pth.Path(config.web_templates + 'frame.html').read_text()
t = t.replace(r'{% endblock %}', '')
fb, fe = t.split(r'{% block frame_content %}')
cls.html_frame = (fb, r'<div class="container">', link_begin,
link_title, link_end, r'</div>', fe)
return cls.html_frame
def __init__(self, **kw):
super().__init__(**kw)
def mboxer(self, ctype=config.HTML, **kw):
yield from self.hdr.mboxer(**kw)
yield 'subject', self.subject(**kw)
if ctype == config.PLAIN:
yield 'text/' + ctype, '\n'.join(self.plainer(**kw))
else:
yield 'text/' + ctype, '\n'.join(self.htmer(self.frame(), **kw))
def plainer(self, **kw):
yield self.text(**kw)
def htmer(self, frame=None, **kw):
if frame:
yield frame[0]
yield frame[1]
yield from self.hdr.htmer(None, frame, **kw)
yield self.markdown.reset().convert(self.text(**kw))
if frame:
yield frame[-3]
yield frame[-2]
yield frame[-1]
def blogger(self, **kw):
yield from self.hdr.blogger(**kw)
yield self.text(**kw)
yield from self.hdr.footer(**kw)
link_begin = """
<div class="row {}">
<div class="col-10">
<div class="card with-margin" style="background-color: #{};">
<div class="card-block">
"""
link_title = """
<h6 class="text-muted">{} <strong>{}:</strong></h6>
"""
link_end = """
</div>
</div>
</div>
</div>
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,494
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/fnet.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
from shutil import copyfile
from ....tokens.utils import AddedToken
from ....tokens.fast import PreTrainedTokenizerFast
from ....tokens.utils import is_sentencepiece_available
if is_sentencepiece_available():
from ..fnet import Tokenizer as FNet
else:
FNet = None
VOCAB_FS = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
VOCAB_MAP = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
INPUT_CAPS = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
SPIECE_UNDERLINE = "▁"
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
model_input_names = ["input_ids", "token_type_ids"]
slow_tokenizer_class = FNet
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=False,
remove_space=True,
keep_accents=True,
unk="<unk>",
sep="[SEP]",
pad="<pad>",
cls="[CLS]",
msk="[MASK]",
**kw,
):
msk = (
AddedToken(msk, lstrip=True, rstrip=False, normalized=False)
if isinstance(msk, str)
else msk
)
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
**kw,
)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return cls + toks_0 + sep
return cls + toks_0 + sep + toks_1 + sep
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
def save_vocabulary(self, dir, pre=None):
path = os.path.join(dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(path):
copyfile(self.vocab_file, path)
return (path,)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,495
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/roformer.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import collections
import os
import rjieba
from tokenizers import normalizers
from ...tokens.utils import PreTrainedTokenizer
from .bert import BasicTokenizer, WordpieceTokenizer, load_vocab
VOCAB_FS = {"vocab_file": "vocab.txt"}
VOCAB_MAP = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_base": "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt",
"junnyu/roformer_small_discriminator": "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt",
"junnyu/roformer_small_generator": "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt",
}
}
INPUT_CAPS = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
PRETRAINED_INIT_CONFIGURATION = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class Tokenizer(PreTrainedTokenizer):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk="[UNK]",
sep="[SEP]",
pad="[PAD]",
cls="[CLS]",
msk="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kw,
):
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kw,
)
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()]
)
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk=self.unk)
self.jieba = rjieba
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def s_vocab(self):
return len(self.vocab)
def __getstate__(self):
state = self.__dict__.copy()
state["jieba"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.jieba = rjieba
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, use_jieba=True):
split_tokens = []
if use_jieba:
for wholword in self.jieba.cut(text, False):
if wholword in self.vocab:
split_tokens.append(wholword)
else:
# use bert tokenizer to _tokenize
char_list = self._tokenize(wholword, use_jieba=False)
split_tokens.extend(char_list)
else:
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(
text, never_split=self.all_special_tokens
):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return [self.cls_token_id] + toks_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + toks_0 + sep + toks_1 + sep
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True)
if toks_1 is not None:
return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1]
return [1] + ([0] * len(toks_0)) + [1]
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
def save_vocabulary(self, dir, pre=None):
index = 0
if os.path.isdir(dir):
vocab_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["vocab_file"],
)
else:
vocab_file = (pre + "-" if pre else "") + dir
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class JiebaPreTokenizer:
def __init__(self, vocab):
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
self.jieba = rjieba
def jieba_split(self, i, normalized_string):
splits = []
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
return splits
def pre_tokenize(self, pretok):
pretok.split(self.jieba_split)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,496
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/megatron.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.megatron import PreTrained
from ...models.megatron import ForPreTraining
import os
import re
import zipfile
logging.set_verbosity_info()
log = logging.get_logger(__name__)
def load_src_weights(model, config, tf_checkpoint_path):
tf_path = abspath(tf_checkpoint_path)
log.info("Converting TensorFlow checkpoint from {}".format(tf_path))
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
log.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
if any(
n
in [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
for n in name
):
log.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
log.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
)
log.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def recursive_print(name, val, spaces=0):
if name is None:
msg = None
else:
fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
msg = fmt.format(name)
if isinstance(val, dict):
if msg is not None:
print(msg)
for k in val.keys():
recursive_print(k, val[k], spaces + 2)
elif isinstance(val, torch.Tensor):
print(msg, ":", val.size())
else:
print(msg, ":", val)
def fix_query_key_value_ordering(param, checkpoint_version, num_splits, n_heads, d_hidden):
input_shape = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [n_heads * d_hidden * num_splits, :]
saved_shape = (n_heads, d_hidden, num_splits) + input_shape[1:]
param = param.view(*saved_shape)
param = param.transpose(0, 2)
param = param.transpose(1, 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [n_heads * num_splits * d_hidden, :]
saved_shape = (n_heads, num_splits, d_hidden) + input_shape[1:]
param = param.view(*saved_shape)
param = param.transpose(0, 1).contiguous()
param = param.view(*input_shape)
return param
def convert_megatron_checkpoint(args, input_state_dict, config):
output_state_dict = {}
ds_args = input_state_dict.get("args", None)
if ds_args is not None:
config.tokenizer_type = ds_args.tokenizer_type
config.s_vocab = ds_args.padded_vocab_size
config.n_pos = ds_args.n_pos
config.d_hidden = ds_args.d_hidden
config.n_lays = ds_args.n_lays
config.n_heads = ds_args.n_heads
config.d_ff = (
ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.d_hidden
)
heads = config.n_heads
hidden_size_per_head = config.d_hidden // heads
if "checkpoint_version" in input_state_dict.keys():
checkpoint_version = input_state_dict["checkpoint_version"]
else:
checkpoint_version = 0.0
# The model.
model = input_state_dict["model"]
# The language model.
lm = model["language_model"]
# The embeddings.
embeddings = lm["embedding"]
# The word embeddings.
word_embeddings = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to s_vocab rows.
word_embeddings = word_embeddings[: config.s_vocab, :]
# Store the word embeddings.
output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings
# The position embeddings.
pos_embeddings = embeddings["position_embeddings"]["weight"]
assert pos_embeddings.size(0) == config.n_pos and pos_embeddings.size(1) == config.d_hidden
# Store the position embeddings.
output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings
# The token-type embeddings.
tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"]
# Store the position embeddings.
output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings
# The transformer.
transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
# The simple map of names for "automated" rules.
megatron_to_transformers = {
"attention.dense": ".attention.output.dense.",
"mlp.dense_h_to_4h": ".intermediate.dense.",
"mlp.dense_4h_to_h": ".output.dense.",
}
# Keep track of the attention/query/value tensor.
attention_qkv_weight = None
# Extract the layers.
for key, val in transformer.items():
# Match the name.
m = layer_re.match(key)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
layer_idx = int(m.group(1))
# The name of the operation.
op_name = m.group(2)
# Is it a weight or a bias?
weight_or_bias = m.group(3)
# The name of the layer.
layer_name = f"bert.encoder.layer.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm"):
ln_name = "attention.ln" if op_name.startswith("input") else "ln"
output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
# Transpose the QKV matrix.
elif op_name == "attention.query_key_value" and weight_or_bias == "weight":
# Make sure the QKV pointer is nil.
assert attention_qkv_weight is None, ""
out_val = fix_query_key_value_ordering(
val, checkpoint_version, 3, heads, hidden_size_per_head
)
# Store the tensor as we need the bias as well to interleave QKV and biases.
attention_qkv_weight = out_val
# Transpose the bias.
elif op_name == "attention.query_key_value" and weight_or_bias == "bias":
# Make sure we read the weight tensor.
assert attention_qkv_weight is not None, ""
# Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved.
q = attention_qkv_weight[0 * config.d_hidden : 1 * config.d_hidden, :]
k = attention_qkv_weight[1 * config.d_hidden : 2 * config.d_hidden, :]
v = attention_qkv_weight[2 * config.d_hidden : 3 * config.d_hidden, :]
out_val = fix_query_key_value_ordering(
val, checkpoint_version, 3, heads, hidden_size_per_head
)
# Split the bias.
q_bias = out_val[0 * config.d_hidden : 1 * config.d_hidden]
k_bias = out_val[1 * config.d_hidden : 2 * config.d_hidden]
v_bias = out_val[2 * config.d_hidden : 3 * config.d_hidden]
# Store.
output_state_dict[f"{layer_name}.attention.self.query.weight"] = q
output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias
output_state_dict[f"{layer_name}.attention.self.key.weight"] = k
output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias
output_state_dict[f"{layer_name}.attention.self.value.weight"] = v
output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias
# Clear the stored tensor.
attention_qkv_weight = None
# Copy weights and biases as is.
elif weight_or_bias in ["weight", "bias"]:
out_name = megatron_to_transformers[op_name]
output_state_dict[layer_name + out_name + weight_or_bias] = val
# The final layernorm.
output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"]
output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"]
# The pooler.
pooler = lm["pooler"]
# Store the matrix and the bias.
output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"]
output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"]
# The LM head from Megatron (for RACE).
lm_head = model["lm_head"]
# The transform matrix.
output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"]
output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"]
# The transform LN.
output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"]
output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"]
# For the decoder, we replicate the weights.
output_state_dict["cls.predictions.decoder.weight"] = word_embeddings
output_state_dict["cls.predictions.bias"] = lm_head["bias"]
# The classifier from Megatron (for MLNI).
binary_head = model["binary_head"]
# Store the classifier.
output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"]
output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"]
# It should be done!
return output_state_dict
def main():
parser = ArgumentParser()
parser.add_argument("--print-checkpoint-structure", action="store_true")
parser.add_argument(
"path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint"
)
parser.add_argument(
"--config_file",
default="",
type=str,
help="An optional config json file describing the pre-trained model.",
)
args = parser.parse_args()
basename = os.path.dirname(args.path_to_checkpoint)
print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"')
if args.path_to_checkpoint.endswith(".zip"):
with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
input_state_dict = torch.load(pytorch_dict, map_location="cpu")
else:
input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
if args.config_file == "":
config = PreTrained()
config.s_vocab = input_state_dict["model"]["lm_head"]["bias"].numel()
else:
config = PreTrained.from_json_file(args.config_file)
print("Converting")
output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
if args.print_checkpoint_structure:
recursive_print(None, output_state_dict)
print("Saving config")
config.save_pretrained(basename)
output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
print(f'Saving checkpoint to "{output_checkpoint_file}"')
torch.save(output_state_dict, output_checkpoint_file)
if __name__ == "__main__":
main()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,497
|
quantapix/qnarre
|
refs/heads/main
|
/notebooks/old/src/ragged.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# !pip install -U tf-nightly-2.0-preview
import tensorflow as tf
import dataset as qd
ks = tf.keras
kl = ks.layers
@tf.function
def adapter(d):
ds = tf.RaggedTensor.from_sparse(d['defs'])
ss = tf.fill([ds.nrows(), 1], qd.SEP)
os = tf.RaggedTensor.from_sparse(d['op'])
x = tf.concat([ds, ss, os], axis=1)
y = tf.RaggedTensor.from_sparse(d['res'])[:, :1].to_tensor()
return (x.flat_values, x.row_splits), y
def dset_for(ps):
ds = tf.data.TFRecordDataset(list(qd.files(ps)))
ds = ds.batch(ps.dim_batch)
fs = {
'defs': tf.io.VarLenFeature(tf.int64),
'op': tf.io.VarLenFeature(tf.int64),
'res': tf.io.VarLenFeature(tf.int64),
}
ds = ds.map(lambda x: tf.io.parse_example(x, fs)).map(qd.caster)
return ds.map(adapter)
class Embed(kl.Layer):
def __init__(self, ps):
super().__init__(dtype=tf.float32)
s = (ps.dim_vocab, ps.dim_hidden)
self.emb = self.add_weight(name='emb', shape=s)
def call(self, x):
fv, rs = x
x = tf.RaggedTensor.from_row_splits(fv, rs)
y = tf.ragged.map_flat_values(tf.nn.embedding_lookup, self.emb, x)
return y
class Reflect(kl.Layer):
def build(self, shape):
s = shape[-1]
self.scale = 1 / (s**0.5)
self.q = self.add_weight(name='q', shape=(s, s))
self.k = self.add_weight(name='k', shape=(s, s))
self.v = self.add_weight(name='v', shape=(s, s))
return super().build(shape)
def call(self, x):
q = x.with_values(tf.einsum('ni,ij->nj', x.flat_values, self.q))
k = x.with_values(tf.einsum('ni,ij->nj', x.flat_values, self.k))
v = x.with_values(tf.einsum('ni,ij->nj', x.flat_values, self.v))
y = tf.einsum('bsi,bzi->bsz', q.to_tensor(), k.to_tensor())
y = tf.nn.softmax(y * self.scale)
y = tf.einsum('bsz,bzi->bsi', y, v.to_tensor())
y = tf.RaggedTensor.from_tensor(y, lengths=x.row_lengths())
return y
class Expand(kl.Layer):
def __init__(self, ps):
super().__init__()
self.ps = ps
def call(self, x):
y = x.to_tensor()
s = tf.shape(y)[-2]
y = tf.pad(y, [[0, 0], [0, self.ps.len_max_input - s], [0, 0]])
return y
def model_for(ps):
x = [
ks.Input(shape=(), dtype='int32'), # , ragged=True)
ks.Input(shape=(), dtype='int64'),
]
y = Embed(ps)(x)
y = Reflect()(y)
y = Expand(ps)(y)
y = kl.Reshape((ps.len_max_input * ps.dim_hidden, ))(y)
y = kl.Dense(ps.dim_dense, activation='relu')(y)
y = kl.Dense(ps.dim_vocab, name='dbd', activation=None)(y)
m = ks.Model(inputs=x, outputs=y)
m.compile(optimizer=ps.optimizer, loss=ps.loss, metrics=[ps.metric])
print(m.summary())
return m
def main_eager(ps, ds, m):
def step(x, y):
with tf.GradientTape() as tape:
logits = m(x)
loss = ps.loss(y, logits)
loss += sum(m.losses)
xent = ps.metric(y, logits)
grads = tape.gradient(loss, m.trainable_variables)
ps.optimizer.apply_gradients(zip(grads, m.trainable_variables))
return loss, xent
@tf.function
def epoch():
s, loss, xent = 0, 0.0, 0.0
for x, y in ds:
s += 1
loss, xent = step(x, y)
if tf.equal(s % 10, 0):
e = ps.metric.result()
tf.print('Step:', s, ', loss:', loss, ', xent:', e)
return loss, xent
for e in range(ps.num_epochs):
loss, xent = epoch()
print(f'Epoch {e} loss:', loss.numpy(), ', xent:', xent.numpy())
params = dict(
dim_batch=2,
dim_dense=150,
dim_hidden=15,
dim_vocab=len(qd.vocab),
len_max_input=20,
loss=ks.losses.SparseCategoricalCrossentropy(from_logits=True),
metric=ks.metrics.SparseCategoricalCrossentropy(from_logits=True),
num_epochs=10,
num_shards=2,
optimizer=ks.optimizers.Adam(),
)
if __name__ == '__main__':
ps = qd.Params(**params)
# import advanced_tf.masking as qm
# qm.main_graph(ps, dset_for(ps), model_for(ps))
main_eager(ps, dset_for(ps), model_for(ps))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,498
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/roformer.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ....tokens.fast import PreTrainedTokenizerFast
from ..roformer import Tokenizer as RoFormer
from ..utils import JiebaPreTokenizer
VOCAB_FS = {"vocab_file": "vocab.txt"}
VOCAB_MAP = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_base": "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt",
"junnyu/roformer_small_discriminator": "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt",
"junnyu/roformer_small_generator": "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt",
}
}
INPUT_CAPS = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
PRETRAINED_INIT_CONFIGURATION = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = RoFormer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk="[UNK]",
sep="[SEP]",
pad="[PAD]",
cls="[CLS]",
msk="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kw,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kw,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
def __getstate__(self):
state = self.__dict__.copy()
state["_tokenizer"].pre_tokenizer = BertPreTokenizer()
return state
def __setstate__(self, d):
self.__dict__ = d
vocab = self.__dict__["_tokenizer"].get_vocab()
self.__dict__["_tokenizer"].pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer(vocab))
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
y = [self.cls_token_id] + toks_0 + [self.sep_token_id]
if toks_1:
y += toks_1 + [self.sep_token_id]
return y
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls + toks_0 + sep) * [0]
return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
def save_vocabulary(self, dir, pre=None):
return tuple(self._tokenizer.model.save(dir, name=pre))
def save_pretrained(self, dir, legacy_format=None, pre=None, push_to_hub=False, **kw):
self.backend_tokenizer.pre_tokenizer = BertPreTokenizer()
return super().save_pretrained(dir, legacy_format, pre, push_to_hub, **kw)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,499
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/flash/bert.py
|
import re
import logging
from functools import partial
from collections.abc import Sequence
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertConfig
from transformers.models.bert.modeling_bert import BaseModelOutputWithPoolingAndCrossAttentions
from transformers.models.bert.modeling_bert import BertForPreTrainingOutput
from einops import rearrange
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import Mlp, FusedMLP
from flash_attn.modules.block import Block
from flash_attn.modules.embedding import BertEmbeddings
from flash_attn.bert_padding import unpad_input, pad_input
from flash_attn.bert_padding import index_first_axis, index_first_axis_residual
from flash_attn.utils.pretrained import state_dict_from_pretrained
try:
from flash_attn.ops.fused_dense import FusedDense
except ImportError:
FusedDense = None
try:
from flash_attn.ops.layer_norm import dropout_add_layer_norm, layer_norm
except ImportError:
dropout_add_layer_norm, layer_norm = None, None
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss
except ImportError:
CrossEntropyLoss = None
logger = logging.getLogger(__name__)
class BertPooler(nn.Module):
def __init__(self, cfg):
super().__init__()
fused_bias_fc = getattr(cfg, "fused_bias_fc", False)
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.dense = linear_cls(cfg.hidden_size, cfg.hidden_size)
self.activation = nn.Tanh()
def forward(self, x, pool=True):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = x[:, 0] if pool else x
y = self.dense(first_token_tensor)
y = self.activation(y)
return y
class BertPredictionHeadTransform(nn.Module):
def __init__(self, cfg):
super().__init__()
fused_bias_fc = getattr(cfg, "fused_bias_fc", False)
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
self.fused_dropout_add_ln = getattr(cfg, "fused_dropout_add_ln", False)
if self.fused_dropout_add_ln and layer_norm is None:
raise ImportError("dropout_add_layer_norm is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.dense = linear_cls(cfg.hidden_size, cfg.hidden_size)
approximate = "tanh" if cfg.hidden_act in ["gelu_new", "gelu_fast"] else "none"
self.transform_act_fn = nn.GELU(approximate=approximate)
self.layer_norm = nn.LayerNorm(cfg.hidden_size, eps=cfg.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
if not self.fused_dropout_add_ln:
hidden_states = self.layer_norm(hidden_states)
else:
hidden_states = layer_norm(
hidden_states, self.layer_norm.weight, self.layer_norm.bias, self.layer_norm.eps
)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, cfg):
super().__init__()
fused_bias_fc = getattr(cfg, "fused_bias_fc", False)
if fused_bias_fc and FusedDense is None:
raise ImportError("fused_dense is not installed")
linear_cls = nn.Linear if not fused_bias_fc else FusedDense
self.transform = BertPredictionHeadTransform(cfg)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = linear_cls(cfg.hidden_size, cfg.vocab_size, bias=True)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertPreTrainingHeads(nn.Module):
def __init__(self, cfg):
super().__init__()
self.predictions = BertLMPredictionHead(cfg)
self.seq_relationship = nn.Linear(cfg.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrained(nn.Module):
def __init__(self, cfg, *inputs, **kwargs):
super().__init__()
if not isinstance(cfg, BertConfig):
raise ValueError(
"Parameter cfg in `{}(cfg)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
self.cfg = cfg
@classmethod
def from_pretrained(cls, model_name, cfg, *inputs, **kwargs):
model = cls(cfg, *inputs, **kwargs)
load_return = model.load_state_dict(
remap_state_dict(state_dict_from_pretrained(model_name), cfg), strict=False
)
logger.info(load_return)
return model
class ForPreTraining(PreTrained):
def __init__(self, cfg):
super().__init__(cfg)
self.dense_seq_output = getattr(cfg, "dense_seq_output", False)
self.last_layer_subset = getattr(cfg, "last_layer_subset", False)
if self.last_layer_subset:
assert self.dense_seq_output, "last_layer_subset requires dense_seq_output"
use_xentropy = getattr(cfg, "use_xentropy", False)
if use_xentropy and CrossEntropyLoss is None:
raise ImportError("xentropy_cuda is not installed")
loss_cls = (
nn.CrossEntropyLoss
if not use_xentropy
else partial(CrossEntropyLoss, inplace_backward=True)
)
self.model = Model(cfg)
self.cls = BertPreTrainingHeads(cfg)
self.mlm_loss = loss_cls(ignore_index=0)
self.nsp_loss = loss_cls(ignore_index=-1)
# Initialize weights and apply final processing
self.apply(partial(_init_weights, initializer_range=cfg.initializer_range))
self.tie_weights()
def tie_weights(self):
self.cls.predictions.decoder.weight = self.model.emb.word_embeddings.weight
def forward(self, x, mask=None, labels=None, next_sentence_label=None, **kw):
masked_tokens_mask = labels > 0 if (self.last_layer_subset and labels is not None) else None
ys = self.model(
x,
mask=mask.bool() if mask is not None else None,
masked_tokens_mask=masked_tokens_mask,
**kw,
)
sequence_output, pooled_output = ys.last_hidden_state, ys.pooler_output
if self.dense_seq_output and labels is not None:
masked_token_idx = torch.nonzero(labels.flatten() > 0, as_tuple=False).flatten()
if not self.last_layer_subset:
sequence_output = index_first_axis(
rearrange(sequence_output, "b s d -> (b s) d"), masked_token_idx
)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
loss = None
if labels is not None and next_sentence_label is not None:
if self.dense_seq_output and labels is not None:
masked_lm_loss = self.mlm_loss(
prediction_scores, labels.flatten()[masked_token_idx]
)
else:
masked_lm_loss = self.mlm_loss(
rearrange(prediction_scores, "... v -> (...) v"),
rearrange(labels, "... -> (...)"),
)
next_sentence_loss = self.nsp_loss(
rearrange(seq_relationship_score, "... t -> (...) t"),
rearrange(next_sentence_label, "... -> (...)"),
)
loss = masked_lm_loss.float() + next_sentence_loss.float()
return BertForPreTrainingOutput(
loss=loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
)
class Model(PreTrained):
def __init__(self, cfg, add_pool=True):
super().__init__(cfg)
self.pad_vocab_size_multiple = getattr(cfg, "pad_vocab_size_multiple", 1)
if cfg.vocab_size % self.pad_vocab_size_multiple != 0:
cfg.vocab_size += self.pad_vocab_size_multiple - (
cfg.vocab_size % self.pad_vocab_size_multiple
)
self.fused_dropout_add_ln = getattr(cfg, "fused_dropout_add_ln", False)
if self.fused_dropout_add_ln and layer_norm is None:
raise ImportError("dropout_add_layer_norm is not installed")
assert cfg.position_embedding_type == "absolute"
assert cfg.hidden_act in ["gelu", "gelu_new", "gelu_fast"]
self.emb = BertEmbeddings(
cfg.hidden_size,
cfg.vocab_size,
cfg.max_position_embeddings,
cfg.type_vocab_size,
padding_idx=cfg.pad_token_id,
)
self.emb_drop = nn.Dropout(cfg.hidden_dropout_prob)
self.emb_ln = nn.LayerNorm(cfg.hidden_size, eps=cfg.layer_norm_eps)
self.enc = Encoder(cfg)
self.pool = BertPooler(cfg) if add_pool else None
self.apply(partial(_init_weights, initializer_range=cfg.initializer_range))
def forward(
self,
x,
position_ids=None,
token_type_ids=None,
mask=None,
masked_tokens_mask=None,
**kw,
):
ys = self.emb(x, **kw)
if not self.fused_dropout_add_ln:
ys = self.emb_ln(ys)
else:
ys = layer_norm(ys, self.emb_ln.weight, self.emb_ln.bias, self.emb_ln.eps)
ys = self.emb_drop(ys)
if masked_tokens_mask is not None:
batch_size, seqlen = x.shape[:2]
first_col_mask = torch.zeros(batch_size, seqlen, dtype=torch.bool, device=x.device)
first_col_mask[:, 0] = True
subset_mask = masked_tokens_mask | first_col_mask
else:
subset_mask = None
ys = self.enc(ys, key_padding_mask=mask, subset_mask=subset_mask)
if masked_tokens_mask is None:
pooled_output = self.pool(ys) if self.pool is not None else None
else:
if mask is not None:
subset_idx = subset_mask[mask]
pool_input = ys[first_col_mask[mask][subset_idx]]
ys = ys[masked_tokens_mask[mask][subset_idx]]
else:
pool_input = ys[first_col_mask[subset_mask]]
ys = ys[masked_tokens_mask[subset_mask]]
pooled_output = self.pool(pool_input, pool=False) if self.pool is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=ys,
pooler_output=pooled_output,
)
class Encoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.use_flash_attn = getattr(cfg, "use_flash_attn", False)
self.lays = nn.ModuleList([create_block(cfg, layer_idx=i) for i in range(cfg.n_lays)])
def forward(self, hidden_states, key_padding_mask=None, subset_mask=None):
if key_padding_mask is None or not self.use_flash_attn:
mixer_kwargs = (
{"key_padding_mask": key_padding_mask} if key_padding_mask is not None else None
)
for layer in self.lays:
hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
if subset_mask is not None:
hidden_states = hidden_states[subset_mask]
else:
b, seqlen = hidden_states.shape[:2]
hidden_states, indices, cu_seqlens, max_seqlen_in_batch = unpad_input(
hidden_states, key_padding_mask
)
mixer_kwargs = {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen_in_batch}
if subset_mask is None:
for layer in self.lays:
hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
hidden_states = pad_input(hidden_states, indices, b, seqlen)
else:
for layer in self.lays[:-1]:
hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
if key_padding_mask is not None:
subset_idx = torch.nonzero(
subset_mask[key_padding_mask], as_tuple=False
).flatten()
subset_seqlens = (subset_mask & key_padding_mask).sum(dim=-1, dtype=torch.int32)
subset_cu_seqlens = F.pad(
torch.cumsum(subset_seqlens, dim=0, dtype=torch.torch.int32), (1, 0)
)
else:
subset_idx = torch.nonzero(subset_mask, as_tuple=False).flatten()
subset_seqlens = subset_mask.sum(dim=-1, dtype=torch.int32)
subset_cu_seqlens = F.pad(
torch.cumsum(subset_seqlens, dim=0, dtype=torch.torch.int32), (1, 0)
)
hidden_states_subset, hidden_states = index_first_axis_residual(
hidden_states, subset_idx
)
mixer_kwargs = {
"x_kv": hidden_states,
"cu_seqlens": subset_cu_seqlens,
"max_seqlen": max_seqlen_in_batch,
"cu_seqlens_k": cu_seqlens,
"max_seqlen_k": max_seqlen_in_batch,
}
hidden_states = self.lays[-1](hidden_states_subset, mixer_kwargs=mixer_kwargs)
return hidden_states
def create_mixer_cls(cfg, cross_attn=False, return_residual=False):
use_flash_attn = getattr(cfg, "use_flash_attn", False)
fused_bias_fc = getattr(cfg, "fused_bias_fc", False)
mixer_cls = partial(
MHA,
num_heads=cfg.num_attention_heads,
cross_attn=cross_attn,
dropout=cfg.attention_probs_dropout_prob,
causal=False,
fused_bias_fc=fused_bias_fc,
use_flash_attn=use_flash_attn,
return_residual=return_residual,
)
return mixer_cls
def create_mlp_cls(cfg, layer_idx=None, return_residual=False):
inner_dim = cfg.intermediate_size
fused_mlp = getattr(cfg, "fused_mlp", False)
if fused_mlp:
assert cfg.hidden_act in ["gelu_new", "gelu_fast"], (
"fused_mlp only " "supports approximate gelu"
)
if not fused_mlp:
approximate = "tanh" if cfg.hidden_act in ["gelu_new", "gelu_fast"] else "none"
mlp_cls = partial(
Mlp,
hidden_features=inner_dim,
activation=partial(F.gelu, approximate=approximate),
return_residual=return_residual,
)
else:
if FusedMLP is None:
raise ImportError("fused_dense is not installed")
mlp_checkpoint_lvl = getattr(cfg, "mlp_checkpoint_lvl", 0)
if isinstance(mlp_checkpoint_lvl, Sequence):
assert layer_idx is not None
mlp_checkpoint_lvl = mlp_checkpoint_lvl[layer_idx]
mlp_cls = partial(
FusedMLP,
hidden_features=inner_dim,
checkpoint_lvl=mlp_checkpoint_lvl,
return_residual=return_residual,
)
return mlp_cls
def create_block(cfg, layer_idx=None):
last_layer_subset = getattr(cfg, "last_layer_subset", False)
cross_attn = last_layer_subset and layer_idx == cfg.n_lays - 1
return_residual = not cross_attn
mixer_cls = create_mixer_cls(cfg, cross_attn, return_residual=return_residual)
mlp_cls = create_mlp_cls(cfg, layer_idx, return_residual=return_residual)
norm_cls = partial(nn.LayerNorm, eps=cfg.layer_norm_eps)
block = Block(
cfg.hidden_size,
mixer_cls,
mlp_cls,
norm_cls=norm_cls,
prenorm=False,
resid_dropout1=cfg.hidden_dropout_prob,
resid_dropout2=cfg.hidden_dropout_prob,
fused_dropout_add_ln=getattr(cfg, "fused_dropout_add_ln", False),
return_residual=return_residual,
)
return block
def _init_weights(module, initializer_range=0.02):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if module.padding_idx is not None:
nn.init.zeros_(module.weight[module.padding_idx])
def remap_state_dict(state_dict, cfg):
# LayerNorm
def key_mapping_ln_gamma_beta(key):
key = re.sub(r"LayerNorm.gamma$", "LayerNorm.weight", key)
key = re.sub(r"LayerNorm.beta$", "LayerNorm.bias", key)
return key
state_dict = OrderedDict((key_mapping_ln_gamma_beta(k), v) for k, v in state_dict.items())
# Layers
def key_mapping_layers(key):
return re.sub(r"^bert.encoder.layer.", "bert.encoder.layers.", key)
state_dict = OrderedDict((key_mapping_layers(k), v) for k, v in state_dict.items())
# LayerNorm
def key_mapping_ln(key):
key = re.sub(r"^bert.embeddings.LayerNorm.", "bert.emb_ln.", key)
key = re.sub(
r"^bert.encoder.layers.(\d+).attention.output.LayerNorm.(weight|bias)",
r"bert.encoder.layers.\1.norm1.\2",
key,
)
key = re.sub(
r"^bert.encoder.layers.(\d+).output.LayerNorm.(weight|bias)",
r"bert.encoder.layers.\1.norm2.\2",
key,
)
key = re.sub(
r"^cls.predictions.transform.LayerNorm.(weight|bias)",
r"cls.predictions.transform.layer_norm.\1",
key,
)
return key
state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
# MLP
def key_mapping_mlp(key):
key = re.sub(
r"^bert.encoder.layers.(\d+).intermediate.dense.(weight|bias)",
r"bert.encoder.layers.\1.mlp.fc1.\2",
key,
)
key = re.sub(
r"^bert.encoder.layers.(\d+).output.dense.(weight|bias)",
r"bert.encoder.layers.\1.mlp.fc2.\2",
key,
)
return key
state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())
# Attention
last_layer_subset = getattr(cfg, "last_layer_subset", False)
for d in range(cfg.n_lays):
Wq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.weight")
Wk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.weight")
Wv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.weight")
bq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.bias")
bk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.bias")
bv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.bias")
if not (last_layer_subset and d == cfg.n_lays - 1):
state_dict[f"bert.encoder.layers.{d}.mixer.Wqkv.weight"] = torch.cat(
[Wq, Wk, Wv], dim=0
)
state_dict[f"bert.encoder.layers.{d}.mixer.Wqkv.bias"] = torch.cat([bq, bk, bv], dim=0)
else:
state_dict[f"bert.encoder.layers.{d}.mixer.Wq.weight"] = Wq
state_dict[f"bert.encoder.layers.{d}.mixer.Wkv.weight"] = torch.cat([Wk, Wv], dim=0)
state_dict[f"bert.encoder.layers.{d}.mixer.Wq.bias"] = bq
state_dict[f"bert.encoder.layers.{d}.mixer.Wkv.bias"] = torch.cat([bk, bv], dim=0)
def key_mapping_attn(key):
return re.sub(
r"^bert.encoder.layers.(\d+).attention.output.dense.(weight|bias)",
r"bert.encoder.layers.\1.mixer.out_proj.\2",
key,
)
state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())
def key_mapping_decoder_bias(key):
return re.sub(r"^cls.predictions.bias", "cls.predictions.decoder.bias", key)
state_dict = OrderedDict((key_mapping_decoder_bias(k), v) for k, v in state_dict.items())
# Word embedding
pad_vocab_size_multiple = getattr(cfg, "pad_vocab_size_multiple", 1)
if pad_vocab_size_multiple > 1:
word_embeddings = state_dict["bert.embeddings.word_embeddings.weight"]
state_dict["bert.embeddings.word_embeddings.weight"] = F.pad(
word_embeddings, (0, 0, 0, cfg.vocab_size - word_embeddings.shape[0])
)
decoder_weight = state_dict["cls.predictions.decoder.weight"]
state_dict["cls.predictions.decoder.weight"] = F.pad(
decoder_weight, (0, 0, 0, cfg.vocab_size - decoder_weight.shape[0])
)
# If the vocab was padded, we want to set the decoder bias for those padded indices to be
# strongly negative (i.e. the decoder shouldn't predict those indices).
# TD [2022-05-09]: I don't think it affects the MLPerf training.
decoder_bias = state_dict["cls.predictions.decoder.bias"]
state_dict["cls.predictions.decoder.bias"] = F.pad(
decoder_bias, (0, cfg.vocab_size - decoder_bias.shape[0]), value=-100.0
)
return state_dict
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,500
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/core/runner.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import datasets
import logging
import math
import os
import transformers
from accelerate import Accelerator
from datasets import load_dataset
from huggingface_hub import Repository
from pathlib import Path
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers.file_utils import get_full_repo_name
from transformers import (
CONFIG_MAPPING,
AdamW,
AutoConfig,
AutoModel,
AutoTokenizer,
default_data_collator,
get_scheduler,
set_seed,
)
from .params import parse_params, TRAIN, EVAL, TEST, ALL
log = logging.getLogger(__name__)
class Runner:
AutoModel = AutoModel
AutoConfig = AutoConfig
AutoTokenizer = AutoTokenizer
def __init__(self, xs=[]):
self.params = ps = parse_params(xs)
self.mgr = mgr = Accelerator()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
log.info(mgr.state)
log.setLevel(logging.INFO if mgr.is_local_main_process else logging.ERROR)
if mgr.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if ps.seed is not None:
set_seed(ps.seed)
if mgr.is_main_process:
if ps.push_to_hub:
if ps.hub_model_id is None:
x = get_full_repo_name(Path(ps.out_dir).name, token=ps.hub_token)
else:
x = ps.hub_model_id
self.repo = Repository(ps.out_dir, clone_from=x)
elif ps.out_dir is not None:
os.makedirs(ps.out_dir, exist_ok=True)
mgr.wait_for_everyone()
self.padding = "max_len" if ps.pad_to_max_length else False
@property
def dataset(self):
if self._dataset is None:
ps = self.params
if ps.dataset_name is not None:
y = load_dataset(ps.dataset_name, ps.dataset_config)
else:
x, xs = None, {}
if ps.test_file is not None:
xs[TEST] = x = ps.test_file
if ps.eval_file is not None:
xs[EVAL] = x = ps.eval_file
if ps.train_file is not None:
xs[TRAIN] = x = ps.train_file
y = load_dataset(x.split(".")[-1], data_files=xs) # field="data")
if ps.debug:
for k in y.keys():
y[k] = y[k].select(range(100))
self._dataset = y
return self._dataset
@property
def cols(self):
if self._cols is None:
cs = self.dataset[TRAIN].column_names
self._cols = {ALL: cs}
return self._cols
@property
def config(self):
if self._config is None:
ps = self.params
x = ps.config_name if ps.config_name else ps.model_name
if x:
y = self.AutoConfig.from_pretrained(x)
else:
y = CONFIG_MAPPING[ps.model_type]()
log.warning("Creating new config")
self._config = y
return self._config
@property
def tokenizer(self):
if self._tokenizer is None:
ps = self.params
x = ps.tokenizer_name if ps.tokenizer_name else ps.model_name
if not x:
raise ValueError("Tokenizer from scratch is not supported")
y = self.AutoTokenizer.from_pretrained(x, use_fast=not ps.use_slow_tokenizer)
self._tokenizer = y
return self._tokenizer
@property
def model(self):
if self._model is None:
ps = self.params
if ps.model_name:
y = self.AutoModel.from_pretrained(
ps.model_name,
from_tf=bool(".ckpt" in ps.model_name),
config=self.config,
)
else:
log.info("Training new model")
y = self.AutoModel.from_config(self.config)
self._model = y
return self._model
@property
def eval_ds(self):
if self._eval_ds is None:
ps, ds = self.params, self.dataset
y = ds[EVAL]
if ps.max_eval_samples is not None:
y = y.select(range(ps.max_eval_samples))
self._eval_ds = y
return self._eval_ds
@property
def test_ds(self):
if self._test_ds is None:
ps, ds = self.params, self.dataset
y = ds[TEST]
if ps.max_test_samples is not None:
y = y.select(range(ps.max_test_samples))
self._test_ds = y
return self._test_ds
@property
def loaders(self):
if self._loaders is None:
ps = self.params
c = default_data_collator
t = DataLoader(
self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.train_batch_size
)
e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.eval_batch_size)
self._loaders = {TRAIN: t, EVAL: e}
return self._loaders
@property
def optimizer(self):
if self._optimizer is None:
ps, m = self.params, self.model
ds = ["bias", "LayerNorm.weight"]
xs = [
{
"params": [p for n, p in m.named_parameters() if not any(d in n for d in ds)],
"weight_decay": ps.weight_decay,
},
{
"params": [p for n, p in m.named_parameters() if any(d in n for d in ds)],
"weight_decay": 0.0,
},
]
self._optimizer = AdamW(xs, lr=ps.lr)
return self._optimizer
@property
def scheduler(self):
if self._scheduler is None:
ps = self.params
self._scheduler = get_scheduler(
name=ps.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=ps.num_warmup_steps,
num_training_steps=ps.max_train_steps,
)
return self._optimizer
def prepare(self):
m, mgr, ls = self.model, self.mgr, self.loaders
m.to(mgr.device)
t, e = ls[TRAIN], ls[EVAL]
self._model, self._optimizer, t, e = mgr.prepare(m, self.optimizer, t, e)
self._loaders = {TRAIN: t, EVAL: e}
def train(self):
ps, mgr, src = self.params, self.mgr, self.loaders[TRAIN]
x = math.ceil(len(src) / ps.grad_accumulation_steps)
if ps.max_train_steps is None:
ps.max_train_steps = ps.num_train_epochs * x
else:
ps.num_train_epochs = math.ceil(ps.max_train_steps / x)
m, o, s = self.model, self.optimizer, self.scheduler
b = ps.train_batch_size * mgr.num_processes * ps.grad_accumulation_steps
log.info("*** Training ***")
log.info(f" Num samples = {len(self.train_ds)}")
log.info(f" Num epochs = {ps.num_train_epochs}")
log.info(f" Batch size per device = {ps.train_batch_size}")
log.info(f" Batch size (w. parallel, distributed & accumulation) = {b}")
log.info(f" Grad accumulation steps = {ps.grad_accumulation_steps}")
log.info(f" Train steps = {ps.max_train_steps}")
n = 0
bar = tqdm(range(ps.max_train_steps), disable=not mgr.is_local_main_process)
for e in range(ps.num_train_epochs):
m.train()
for i, xs in enumerate(src):
ys = m(**xs)
mgr.backward(ys.loss / ps.grad_accumulation_steps)
if i % ps.grad_accumulation_steps == 0 or i == len(src) - 1:
o.step()
s.step()
o.zero_grad()
bar.update(1)
n += 1
if n >= ps.max_train_steps:
break
self.eval_epoch(e)
if ps.push_to_hub and e < ps.num_train_epochs - 1:
mgr.wait_for_everyone()
mgr.unwrap_model(m).save_pretrained(ps.out_dir, save_function=mgr.save)
if mgr.is_main_process:
self.tokenizer.save_pretrained(ps.out_dir)
self.repo.push_to_hub(commit_message=f"Training... epoch {e}", blocking=False)
def eval_epoch(self, _):
pass
def save(self):
ps, mgr = self.params, self.mgr
if ps.out_dir is not None:
mgr.wait_for_everyone()
mgr.unwrap_model(self.model).save_pretrained(ps.out_dir, save_function=mgr.save)
if mgr.is_main_process:
self.tokenizer.save_pretrained(ps.out_dir)
if ps.push_to_hub:
self.repo.push_to_hub(commit_message="End of training")
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,501
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/seq2seq.py
|
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune seq2seq models for question answering
import logging
import random
from datasets import load_metric
from torch.utils.data import DataLoader
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq
from transformers.trainer_utils import EvalPrediction
from .params import TRAIN, EVAL, TEST, ALL, EACH
from .runner import Runner as Base
from .qa import Runner as QA
log = logging.getLogger(__name__)
NAMES = {
"squad_v2": ("question", "context", "answer"),
}
class Runner(Base):
AutoModel = AutoModelForSeq2SeqLM
@property
def cols(self):
if self._cols is None:
ps = self.params
if ps.do_train:
cs = self.dataset[TRAIN].column_names
elif ps.do_eval:
cs = self.dataset[EVAL].column_names
elif ps.do_test:
cs = self.dataset[TEST].column_names
else:
raise ValueError("There is nothing to do")
ns = NAMES.get(ps.dataset_name, None)
if ps.question_column is None:
q = ns[0] if ns is not None else cs[0]
else:
q = ps.question_column
if q not in cs:
raise ValueError(f"--question_column' needs to be in: {', '.join(cs)}")
if ps.context_column is None:
c = ns[1] if ns is not None else cs[1]
else:
c = ps.context_column
if c not in cs:
raise ValueError(f"--context_column' needs to be in: {', '.join(cs)}")
if ps.answer_column is None:
a = ns[2] if ns is not None else cs[2]
else:
a = ps.answer_column
if a not in cs:
raise ValueError(f"--answer_column' needs to be in: {', '.join(cs)}")
self._cols = {ALL: cs, EACH: [q, c, a]}
return self._cols
@property
def config(self):
if self._config is None:
ps = self.params
x = ps.config_name if ps.config_name else ps.model_name
if not x:
raise ValueError("Config from scratch is not supported")
if x:
y = self.AutoConfig.from_pretrained(
x,
cache_dir=ps.cache_dir,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
self._config = y
return self._config
@property
def tokenizer(self):
if self._tokenizer is None:
ps = self.params
x = ps.tokenizer_name if ps.tokenizer_name else ps.model_name
if not x:
raise ValueError("Tokenizer from scratch is not supported")
y = self.AutoTokenizer.from_pretrained(
x,
cache_dir=ps.cache_dir,
use_fast=True,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
self._tokenizer = y
if ps.max_seq_length > y.model_max_length:
log.warning(f"Using max_seq_length={y.model_max_length}")
self.max_seq_length = min(ps.max_seq_length, y.model_max_length)
return self._tokenizer
@property
def model(self):
if self._model is None:
ps = self.params
if ps.model_name:
y = self.AutoModel.from_pretrained(
ps.model_name,
from_tf=bool(".ckpt" in ps.model_name),
config=self.config,
cache_dir=ps.cache_dir,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
else:
log.info("Training new model")
y = self.AutoModel.from_config(self.config)
self._model = y
if y.config.dec_START is None:
raise ValueError("Needs `config.dec_START`")
if ps.label_smoothing_factor > 0 and not hasattr(
y, "prepare_decoder_input_ids_from_labels"
):
log.warning("Needs `prepare_decoder_input_ids_from_labels` method for model")
return self._model
@property
def train_ds(self):
if self._train_ds is None:
ps, mgr, ds = self.params, self.mgr, self.dataset
y = ds[TRAIN]
if ps.max_train_samples is not None:
y = y.select(range(ps.max_train_samples))
with mgr.main_process_first():
y = y.map(
self.prep_for_train,
batched=True,
num_proc=ps.num_workers,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on train dataset",
)
for i in random.sample(range(len(y)), 3):
log.info(f"Sample {i} of the training set: {y[i]}")
self._train_ds = y
return self._train_ds
def prep_for_train(self, xs):
ps, t = self.params, self.tokenizer
ins, ans = self.prep_batch(xs)
y = t(ins, max_len=self.max_seq_length, padding=self.padding, truncation=True)
with t.as_target_tokenizer():
ls = t(ans, max_len=ps.max_answer_length, padding=self.padding, truncation=True)
if self.padding == "max_len" and ps.ignore_pad_token_for_loss:
ls["input_ids"] = [[(x if x != t.PAD else -100) for x in l] for l in ls["input_ids"]]
y["labels"] = ls["input_ids"]
return y
def prep_batch(self, xs):
q, c, a = self.cols[EACH]
ins = [
" ".join(["question:", q.lstrip(), "context:", c.lstrip()])
for q, c in zip(xs[q], xs[c])
]
ans = [a["text"][0] if len(a["text"]) > 0 else "" for a in xs[a]]
return ins, ans
@property
def eval_ds(self):
if self._eval_ds is None:
ps, mgr = self.params, self.mgr
y = super().eval_ds
with mgr.main_process_first():
y = y.map(
self.prep_for_eval,
batched=True,
num_proc=ps.num_workers,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on eval dataset",
)
self._eval_ds = y
return self._eval_ds
def prep_for_eval(self, xs):
ps, t = self.params, self.tokenizer
ins, ans = self.prep_batch(xs)
y = t(
ins,
max_len=self.max_seq_length,
padding=self.padding,
truncation=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
)
with t.as_target_tokenizer():
ls = t(ans, max_len=ps.max_answer_length, padding=self.padding, truncation=True)
map = y.pop("overflow_to_sample_mapping")
y["example_id"] = []
for i in range(len(y["input_ids"])):
y["example_id"].append(xs["id"][map[i]])
if self.padding == "max_len" and ps.ignore_pad_token_for_loss:
ls["input_ids"] = [[(x if x != t.PAD else -100) for x in l] for l in ls["input_ids"]]
y["labels"] = ls["input_ids"]
return y
@property
def test_ds(self):
if self._test_ds is None:
ps, mgr = self.params, self.mgr
y = super().test_ds
with mgr.main_process_first():
y = y.map(
self.prep_for_eval,
batched=True,
num_proc=ps.num_workers,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on test dataset",
)
self._test_ds = y
return self._test_ds
@property
def metric(self):
if self._metric is None:
self.metric = load_metric("squad_v2" if self.ps.version_2_with_negative else "squad")
return self._metric
def compute_metrics(self, p):
return self.metric.compute(predictions=p.predictions, references=p.label_ids)
@property
def loaders(self):
if self._loaders is None:
ps, t = self.params, self.tokenizer
c = DataCollatorForSeq2Seq(
t,
model=self.model,
label_pad_token_id=-100 if ps.ignore_pad_token_for_loss else t.PAD,
pad_to_multiple_of=8 if ps.fp16 else None,
)
t = DataLoader(
self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.train_batch_size
)
e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.eval_batch_size)
self._loaders = {TRAIN: t, EVAL: e}
return self._loaders
def post_proc(self, xs, features, outs, stage="eval"):
ps = self.params
preds = outs.predictions
if isinstance(preds, tuple):
preds = preds[0]
preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)
map = {k: i for i, k in enumerate(xs["id"])}
feature_per_example = {map[x["example_id"]]: i for i, x in enumerate(features)}
ys = {}
for i, x in enumerate(xs):
ys[x["id"]] = preds[feature_per_example[i]]
if ps.version_2_with_negative:
ys = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in ys.items()
]
else:
ys = [{"id": k, "prediction_text": v} for k, v in ys.items()]
ls = [{"id": x["id"], "answers": x[self.cols[EACH][2]]} for x in xs]
return EvalPrediction(predictions=ys, label_ids=ls)
def main():
x = Runner()
x.cols
x.dataset
x.config
x.tokenizer
x.model
x.model.resize_token_embeddings(len(x.tokenizer))
x.loaders
x.prepare()
x.train()
x.eval()
x.test()
x.save()
if __name__ == "__main__":
main()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,502
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/luke.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import itertools
import json
import os
import numpy as np
from .roberta import Tokenizer as Roberta
from ...tokens.utils import (
AddedToken,
BatchEncoding,
PaddingStrategy,
TruncationStrategy,
_is_tensorflow,
_is_torch,
to_py_obj,
)
from ...tokens.utils import is_tf_available, is_torch_available
VOCAB_FS = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"entity_vocab_file": "entity_vocab.json",
}
VOCAB_MAP = {
"vocab_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/vocab.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/vocab.json",
},
"merges_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/merges.txt",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/merges.txt",
},
"entity_vocab_file": {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/entity_vocab.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/entity_vocab.json",
},
}
INPUT_CAPS = {
"studio-ousia/luke-base": 512,
"studio-ousia/luke-large": 512,
}
class Tokenizer(Roberta):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
def __init__(
self,
vocab_file,
merges_file,
entity_vocab_file,
task=None,
max_entity_length=32,
max_mention_length=30,
entity_token_1="<ent>",
entity_token_2="<ent2>",
entity_unk_token="[UNK]",
entity_pad_token="[PAD]",
entity_mask_token="[MASK]",
entity_mask2_token="[MASK2]",
**kw,
):
entity_token_1 = (
AddedToken(entity_token_1, lstrip=False, rstrip=False)
if isinstance(entity_token_1, str)
else entity_token_1
)
entity_token_2 = (
AddedToken(entity_token_2, lstrip=False, rstrip=False)
if isinstance(entity_token_2, str)
else entity_token_2
)
kw["additional_special_tokens"] = kw.get("additional_special_tokens", [])
kw["additional_special_tokens"] += [entity_token_1, entity_token_2]
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
task=task,
max_entity_length=32,
max_mention_length=30,
entity_token_1="<ent>",
entity_token_2="<ent2>",
entity_unk_token=entity_unk_token,
entity_pad_token=entity_pad_token,
entity_mask_token=entity_mask_token,
entity_mask2_token=entity_mask2_token,
**kw,
)
with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle:
self.entity_vocab = json.load(entity_vocab_handle)
for entity_special_token in [
entity_unk_token,
entity_pad_token,
entity_mask_token,
entity_mask2_token,
]:
if entity_special_token not in self.entity_vocab:
raise ValueError(
f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. "
f"Probably an incorrect entity vocab file is loaded: {entity_vocab_file}."
)
self.entity_unk_token_id = self.entity_vocab[entity_unk_token]
self.entity_pad_token_id = self.entity_vocab[entity_pad_token]
self.entity_mask_token_id = self.entity_vocab[entity_mask_token]
self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token]
self.task = task
if task is None or task == "entity_span_classification":
self.max_entity_length = max_entity_length
elif task == "entity_classification":
self.max_entity_length = 1
elif task == "entity_pair_classification":
self.max_entity_length = 2
else:
raise ValueError(
f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification', 'entity_span_classification'] only."
)
self.max_mention_length = max_mention_length
def __call__(
self,
text,
text_pair=None,
entity_spans=None,
entity_spans_pair=None,
entities=None,
entities_pair=None,
add_special_tokens=True,
padding=False,
truncation=False,
max_length=None,
max_entity_length=None,
stride=0,
is_split_into_words=False,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
return_length=False,
verbose=True,
**kw,
):
is_valid_single_text = isinstance(text, str)
is_valid_batch_text = isinstance(text, (list, tuple)) and (
len(text) == 0 or (isinstance(text[0], str))
)
if not (is_valid_single_text or is_valid_batch_text):
raise ValueError(
"text input must be of type `str` (single example) or `List[str]` (batch)."
)
is_valid_single_text_pair = isinstance(text_pair, str)
is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (
len(text_pair) == 0 or isinstance(text_pair[0], str)
)
if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):
raise ValueError(
"text_pair input must be of type `str` (single example) or `List[str]` (batch)."
)
is_batched = bool(isinstance(text, (list, tuple)))
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
if entities is None:
batch_entities_or_entities_pairs = None
else:
batch_entities_or_entities_pairs = (
list(zip(entities, entities_pair)) if entities_pair is not None else entities
)
if entity_spans is None:
batch_entity_spans_or_entity_spans_pairs = None
else:
batch_entity_spans_or_entity_spans_pairs = (
list(zip(entity_spans, entity_spans_pair))
if entity_spans_pair is not None
else entity_spans
)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kw,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
entities=entities,
entities_pair=entities_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kw,
)
def _encode_plus(
self,
text,
text_pair=None,
entity_spans=None,
entity_spans_pair=None,
entities=None,
entities_pair=None,
add_special_tokens=True,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE,
max_length=None,
max_entity_length=None,
stride=0,
is_split_into_words=False,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
return_length=False,
verbose=True,
**kw,
):
if return_offsets_mapping:
raise NotImplementedError()
if is_split_into_words:
raise NotImplementedError()
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kw,
)
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs,
batch_entity_spans_or_entity_spans_pairs=None,
batch_entities_or_entities_pairs=None,
add_special_tokens=True,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE,
max_length=None,
max_entity_length=None,
stride=0,
is_split_into_words=False,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
return_length=False,
verbose=True,
**kw,
):
if return_offsets_mapping:
raise NotImplementedError()
if is_split_into_words:
raise NotImplementedError()
input_ids = []
entity_ids = []
entity_token_spans = []
for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
if not isinstance(text_or_text_pair, (list, tuple)):
text, text_pair = text_or_text_pair, None
else:
text, text_pair = text_or_text_pair
entities, entities_pair = None, None
if batch_entities_or_entities_pairs is not None:
entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
if entities_or_entities_pairs:
if isinstance(entities_or_entities_pairs[0], str):
entities, entities_pair = entities_or_entities_pairs, None
else:
entities, entities_pair = entities_or_entities_pairs
entity_spans, entity_spans_pair = None, None
if batch_entity_spans_or_entity_spans_pairs is not None:
entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(
entity_spans_or_entity_spans_pairs[0], list
):
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
else:
entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
(
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
) = self._create_input_sequence(
text=text,
text_pair=text_pair,
entities=entities,
entities_pair=entities_pair,
entity_spans=entity_spans,
entity_spans_pair=entity_spans_pair,
**kw,
)
input_ids.append((first_ids, second_ids))
entity_ids.append((first_entity_ids, second_entity_ids))
entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
batch_outputs = self._batch_prepare_for_model(
input_ids,
batch_entity_ids_pairs=entity_ids,
batch_entity_token_spans_pairs=entity_token_spans,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
def _check_entity_input_format(self, entities, entity_spans):
if not isinstance(entity_spans, list):
raise ValueError("entity_spans should be given as a list")
elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
raise ValueError(
"entity_spans should be given as a list of tuples "
"containing the start and end character indices"
)
if entities is not None:
if not isinstance(entities, list):
raise ValueError("If you specify entities, they should be given as a list")
if len(entities) > 0 and not isinstance(entities[0], str):
raise ValueError(
"If you specify entities, they should be given as a list of entity names"
)
if len(entities) != len(entity_spans):
raise ValueError(
"If you specify entities, entities and entity_spans must be the same length"
)
def _create_input_sequence(
self,
text,
text_pair=None,
entities=None,
entities_pair=None,
entity_spans=None,
entity_spans_pair=None,
**kw,
):
def get_input_ids(text):
tokens = self.tokenize(text, **kw)
return self.convert_tokens_to_ids(tokens)
def get_input_ids_and_entity_token_spans(text, entity_spans):
if entity_spans is None:
return get_input_ids(text), None
cur = 0
input_ids = []
entity_token_spans = [None] * len(entity_spans)
split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
char_pos2token_pos = {}
for split_char_position in split_char_positions:
orig_split_char_position = split_char_position
if split_char_position > 0 and text[split_char_position - 1] == " ":
split_char_position -= 1
if cur != split_char_position:
input_ids += get_input_ids(text[cur:split_char_position])
cur = split_char_position
char_pos2token_pos[orig_split_char_position] = len(input_ids)
input_ids += get_input_ids(text[cur:])
entity_token_spans = [
(char_pos2token_pos[char_start], char_pos2token_pos[char_end])
for char_start, char_end in entity_spans
]
return input_ids, entity_token_spans
first_ids, second_ids = None, None
first_entity_ids, second_entity_ids = None, None
first_entity_token_spans, second_entity_token_spans = None, None
if self.task is None:
if entity_spans is None:
first_ids = get_input_ids(text)
else:
self._check_entity_input_format(entities, entity_spans)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(
text, entity_spans
)
if entities is None:
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
first_entity_ids = [
self.entity_vocab.get(entity, self.entity_unk_token_id)
for entity in entities
]
if text_pair is not None:
if entity_spans_pair is None:
second_ids = get_input_ids(text_pair)
else:
self._check_entity_input_format(entities_pair, entity_spans_pair)
second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
text_pair, entity_spans_pair
)
if entities_pair is None:
second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair)
else:
second_entity_ids = [
self.entity_vocab.get(entity, self.entity_unk_token_id)
for entity in entities_pair
]
elif self.task == "entity_classification":
if not (
isinstance(entity_spans, list)
and len(entity_spans) == 1
and isinstance(entity_spans[0], tuple)
):
raise ValueError(
"Entity spans should be a list containing a single tuple "
"containing the start and end character indices of an entity"
)
first_entity_ids = [self.entity_mask_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(
text, entity_spans
)
entity_token_start, entity_token_end = first_entity_token_spans[0]
first_ids = (
first_ids[:entity_token_end]
+ [self.additional_special_tokens_ids[0]]
+ first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start]
+ [self.additional_special_tokens_ids[0]]
+ first_ids[entity_token_start:]
)
first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
elif self.task == "entity_pair_classification":
if not (
isinstance(entity_spans, list)
and len(entity_spans) == 2
and isinstance(entity_spans[0], tuple)
and isinstance(entity_spans[1], tuple)
):
raise ValueError(
"Entity spans should be provided as a list of two tuples, "
"each tuple containing the start and end character indices of an entity"
)
head_span, tail_span = entity_spans
first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id]
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(
text, entity_spans
)
head_token_span, tail_token_span = first_entity_token_spans
token_span_with_special_token_ids = [
(head_token_span, self.additional_special_tokens_ids[0]),
(tail_token_span, self.additional_special_tokens_ids[1]),
]
if head_token_span[0] < tail_token_span[0]:
first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
else:
first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
for (
entity_token_start,
entity_token_end,
), special_token_id in token_span_with_special_token_ids:
first_ids = (
first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
)
first_ids = (
first_ids[:entity_token_start]
+ [special_token_id]
+ first_ids[entity_token_start:]
)
elif self.task == "entity_span_classification":
if not (
isinstance(entity_spans, list)
and len(entity_spans) > 0
and isinstance(entity_spans[0], tuple)
):
raise ValueError(
"Entity spans should be provided as a list of tuples, "
"each tuple containing the start and end character indices of an entity"
)
first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(
text, entity_spans
)
first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
else:
raise ValueError(f"Task {self.task} not supported")
return (
first_ids,
second_ids,
first_entity_ids,
second_entity_ids,
first_entity_token_spans,
second_entity_token_spans,
)
def _batch_prepare_for_model(
self,
batch_ids_pairs,
batch_entity_ids_pairs,
batch_entity_token_spans_pairs,
add_special_tokens=True,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE,
max_length=None,
max_entity_length=None,
stride=0,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_length=False,
verbose=True,
):
batch_outputs = {}
for input_ids, entity_ids, entity_token_span_pairs in zip(
batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
):
first_ids, second_ids = input_ids
first_entity_ids, second_entity_ids = entity_ids
first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
outputs = self.prepare_for_model(
first_ids,
second_ids,
entity_ids=first_entity_ids,
pair_entity_ids=second_entity_ids,
entity_token_spans=first_entity_token_spans,
pair_entity_token_spans=second_entity_token_spans,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
max_entity_length=max_entity_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_model(
self,
ids,
pair_ids=None,
entity_ids=None,
pair_entity_ids=None,
entity_token_spans=None,
pair_entity_token_spans=None,
add_special_tokens=True,
padding=False,
truncation=False,
max_length=None,
max_entity_length=None,
stride=0,
pad_to_multiple_of=None,
return_tensors=None,
return_token_type_ids=None,
return_attention_mask=None,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False,
return_length=False,
verbose=True,
prepend_batch_axis=False,
**kw,
):
(
padding_strategy,
truncation_strategy,
max_length,
kw,
) = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kw,
)
# Compute lengths
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
total_len = (
len_ids
+ len_pair_ids
+ (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
)
# Truncation: Handle max sequence length and max_entity_length
overflowing_tokens = []
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and max_length
and total_len > max_length
):
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
entity_token_offset = 1 # 1 * <s> token
pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
entity_token_offset = 0
pair_entity_token_offset = len(ids)
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
if not max_entity_length:
max_entity_length = self.max_entity_length
if entity_ids is not None:
total_entity_len = 0
num_invalid_entities = 0
valid_entity_ids = [
ent_id
for ent_id, span in zip(entity_ids, entity_token_spans)
if span[1] <= len(ids)
]
valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
total_entity_len += len(valid_entity_ids)
num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
if pair_entity_ids is not None:
valid_pair_entity_ids = [
ent_id
for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
if span[1] <= len(pair_ids)
]
valid_pair_entity_token_spans = [
span for span in pair_entity_token_spans if span[1] <= len(pair_ids)
]
total_entity_len += len(valid_pair_entity_ids)
num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
if num_invalid_entities != 0:
logger.warning(
f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the truncation of input tokens"
)
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and total_entity_len > max_entity_length
):
(
valid_entity_ids,
valid_pair_entity_ids,
overflowing_entities,
) = self.truncate_sequences(
valid_entity_ids,
pair_ids=valid_pair_entity_ids,
num_tokens_to_remove=total_entity_len - max_entity_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
if valid_pair_entity_token_spans is not None:
valid_pair_entity_token_spans = valid_pair_entity_token_spans[
: len(valid_pair_entity_ids)
]
if return_overflowing_tokens:
encoded_inputs["overflowing_entities"] = overflowing_entities
encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
final_entity_ids = (
valid_entity_ids + valid_pair_entity_ids
if valid_pair_entity_ids
else valid_entity_ids
)
encoded_inputs["entity_ids"] = list(final_entity_ids)
entity_position_ids = []
entity_start_positions = []
entity_end_positions = []
for (token_spans, offset) in (
(valid_entity_token_spans, entity_token_offset),
(valid_pair_entity_token_spans, pair_entity_token_offset),
):
if token_spans is not None:
for start, end in token_spans:
start += offset
end += offset
position_ids = list(range(start, end))[: self.max_mention_length]
position_ids += [-1] * (self.max_mention_length - end + start)
entity_position_ids.append(position_ids)
entity_start_positions.append(start)
entity_end_positions.append(end - 1)
encoded_inputs["entity_position_ids"] = entity_position_ids
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = entity_start_positions
encoded_inputs["entity_end_positions"] = entity_end_positions
if return_token_type_ids:
encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
self._eventual_warn_about_too_long_sequence(
encoded_inputs["input_ids"], max_length, verbose
)
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def pad(
self,
encoded_inputs,
padding=True,
max_length=None,
max_entity_length=None,
pad_to_multiple_of=None,
return_attention_mask=None,
return_tensors=None,
verbose=True,
):
if isinstance(encoded_inputs, (list, tuple)) and isinstance(
encoded_inputs[0], (dict, BatchEncoding)
):
encoded_inputs = {
key: [example[key] for example in encoded_inputs]
for key in encoded_inputs[0].keys()
}
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if not required_input:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if not isinstance(first_element, (int, list, tuple)):
if is_tf_available() and _is_tensorflow(first_element):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and _is_torch(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if max_entity_length is None:
max_entity_length = self.max_entity_length
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
if any(len(v) != batch_size for v in encoded_inputs.values()):
raise ValueError(
"Some items in the output dictionary have a different batch size than others."
)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
max_entity_length = (
max(len(inputs) for inputs in encoded_inputs["entity_ids"])
if "entity_ids" in encoded_inputs
else 0
)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
max_entity_length=max_entity_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
encoded_inputs,
max_length=None,
max_entity_length=None,
padding_strategy=PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of=None,
return_attention_mask=None,
):
entities_provided = bool("entity_ids" in encoded_inputs)
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(encoded_inputs["input_ids"])
if entities_provided:
max_entity_length = len(encoded_inputs["entity_ids"])
if (
max_length is not None
and pad_to_multiple_of is not None
and (max_length % pad_to_multiple_of != 0)
):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
if (
entities_provided
and max_entity_length is not None
and pad_to_multiple_of is not None
and (max_entity_length % pad_to_multiple_of != 0)
):
max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
len(encoded_inputs["input_ids"]) != max_length
or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
)
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
if (
entities_provided
and return_attention_mask
and "entity_attention_mask" not in encoded_inputs
):
encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
if needs_to_be_padded:
difference = max_length - len(encoded_inputs["input_ids"])
if entities_provided:
entity_difference = max_entity_length - len(encoded_inputs["entity_ids"])
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = (
encoded_inputs["attention_mask"] + [0] * difference
)
if entities_provided:
encoded_inputs["entity_attention_mask"] = (
encoded_inputs["entity_attention_mask"] + [0] * entity_difference
)
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [0] * difference
)
if entities_provided:
encoded_inputs["entity_token_type_ids"] = (
encoded_inputs["entity_token_type_ids"] + [0] * entity_difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = (
encoded_inputs["special_tokens_mask"] + [1] * difference
)
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.PAD] * difference
if entities_provided:
encoded_inputs["entity_ids"] = (
encoded_inputs["entity_ids"]
+ [self.entity_pad_token_id] * entity_difference
)
encoded_inputs["entity_position_ids"] = (
encoded_inputs["entity_position_ids"]
+ [[-1] * self.max_mention_length] * entity_difference
)
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = (
encoded_inputs["entity_start_positions"] + [0] * entity_difference
)
encoded_inputs["entity_end_positions"] = (
encoded_inputs["entity_end_positions"] + [0] * entity_difference
)
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs[
"attention_mask"
]
if entities_provided:
encoded_inputs["entity_attention_mask"] = [
0
] * entity_difference + encoded_inputs["entity_attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs[
"token_type_ids"
]
if entities_provided:
encoded_inputs["entity_token_type_ids"] = [
0
] * entity_difference + encoded_inputs["entity_token_type_ids"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs[
"special_tokens_mask"
]
encoded_inputs["input_ids"] = [self.PAD] * difference + encoded_inputs["input_ids"]
if entities_provided:
encoded_inputs["entity_ids"] = [
self.entity_pad_token_id
] * entity_difference + encoded_inputs["entity_ids"]
encoded_inputs["entity_position_ids"] = [
[-1] * self.max_mention_length
] * entity_difference + encoded_inputs["entity_position_ids"]
if self.task == "entity_span_classification":
encoded_inputs["entity_start_positions"] = [
0
] * entity_difference + encoded_inputs["entity_start_positions"]
encoded_inputs["entity_end_positions"] = [
0
] * entity_difference + encoded_inputs["entity_end_positions"]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
def save_vocabulary(self, dir, pre=None):
vocab_file, merge_file = super().save_vocabulary(dir, pre)
entity_vocab_file = os.path.join(
dir,
(pre + "-" if pre else "") + VOCAB_FS["entity_vocab_file"],
)
with open(entity_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.entity_vocab, ensure_ascii=False))
return vocab_file, merge_file, entity_vocab_file
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,503
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/metric/bert.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import bert_score
import datasets as ds
import functools
from contextlib import contextmanager
from packaging import version
@contextmanager
def filter_logging_context():
def filter_log(record):
return False if "This IS expected if you are initializing" in record.msg else True
logger = ds.utils.logging.get_logger("transformers.modeling_utils")
logger.addFilter(filter_log)
try:
yield
finally:
logger.removeFilter(filter_log)
class BERTScore(ds.Metric):
def _info(self):
return ds.MetricInfo(
description="",
citation="",
inputs_description="",
features=ds.Features(
{
"predictions": ds.Value("string", id="sequence"),
"references": ds.Sequence(ds.Value("string", id="sequence"), id="references"),
}
),
codebase_urls=[],
reference_urls=[],
)
def _compute(
self,
predictions,
references,
lang=None,
model_type=None,
n_lays=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False,
):
get_hash = bert_score.utils.get_hash
scorer = bert_score.BERTScorer
if version.parse(bert_score.__version__) >= version.parse("0.3.10"):
get_hash = functools.partial(get_hash, use_fast_tokenizer=use_fast_tokenizer)
scorer = functools.partial(scorer, use_fast_tokenizer=use_fast_tokenizer)
elif use_fast_tokenizer:
raise ImportWarning(
"To use a fast tokenizer, the module `bert-score>=0.3.10` is required, and the current version of `bert-score` doesn't match this condition.\n"
'You can install it with `pip install "bert-score>=0.3.10"`.'
)
if model_type is None:
assert lang is not None, "either lang or model_type should be specified"
model_type = bert_score.utils.lang2model[lang.lower()]
if n_lays is None:
n_lays = bert_score.utils.model2layers[model_type]
hashcode = get_hash(
model=model_type,
n_lays=n_lays,
idf=idf,
rescale_with_baseline=rescale_with_baseline,
use_custom_baseline=baseline_path is not None,
)
with filter_logging_context():
if not hasattr(self, "cached_bertscorer") or self.cached_bertscorer.hash != hashcode:
self.cached_bertscorer = scorer(
model_type=model_type,
n_lays=n_lays,
batch_size=batch_size,
nthreads=nthreads,
all_layers=all_layers,
idf=idf,
device=device,
lang=lang,
rescale_with_baseline=rescale_with_baseline,
baseline_path=baseline_path,
)
(P, R, F) = self.cached_bertscorer.score(
cands=predictions,
refs=references,
verbose=verbose,
batch_size=batch_size,
)
return {
"precision": P.tolist(),
"recall": R.tolist(),
"f1": F.tolist(),
"hashcode": hashcode,
}
def add_batch(self, preds=None, refs=None, **kw):
if refs is not None:
refs = [[r] if isinstance(r, str) else r for r in refs]
super().add_batch(preds, refs, **kw)
def add(self, pred=None, ref=None, **kw):
if isinstance(ref, str):
ref = [ref]
super().add(pred, ref, **kw)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,504
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/language/test_block_pointer.py
|
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def block_copy_kernel(a_ptr, b_ptr, N, BLOCK_SIZE: tl.constexpr, padding_option: tl.constexpr):
pid = tl.program_id(0)
# We only copy half of the data to see if the padding works
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(N // 2, ), strides=(1, ), offsets=(pid * BLOCK_SIZE, ),
block_shape=(BLOCK_SIZE, ), order=(0, ))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(N, ), strides=(1, ), offsets=(pid * BLOCK_SIZE, ),
block_shape=(BLOCK_SIZE, ), order=(0, ))
a = tl.load(a_block_ptr, boundary_check=(0, ), padding_option=padding_option)
tl.store(b_block_ptr, a, boundary_check=(0, ))
@pytest.mark.parametrize("dtype_str, n, padding_option",
[(dtype_str, n, padding) for dtype_str in ("bool", "int16", "float16")
for n in (64, 128, 256, 512, 1024)
for padding in ("zero", "nan")])
def test_block_copy(dtype_str, n, padding_option):
capability = torch.cuda.get_device_capability()
if capability[0] >= 9:
pytest.skip("Hopper support is working in progress")
dtype = getattr(torch, dtype_str)
if dtype_str in ("bool", "int16"):
if padding_option == "nan":
pytest.skip("Padding with NaN is not supported for integer types")
a = torch.randint(0, 2, (n, ), device="cuda", dtype=dtype)
else:
a = torch.randn((n, ), device="cuda", dtype=dtype)
b = torch.zeros((n, ), device="cuda", dtype=dtype)
grid = lambda meta: (triton.cdiv(n, meta["BLOCK_SIZE"]),)
block_copy_kernel[grid](a_ptr=a, b_ptr=b, N=n, BLOCK_SIZE=64, padding_option=padding_option)
assert torch.all(a[0: n // 2] == b[0: n // 2])
if padding_option == "zero":
assert torch.all(b[n // 2: n] == 0)
else:
assert torch.all(torch.isnan(b[n // 2: n]))
@triton.jit
def matmul_no_scf_with_advance_kernel(
a_ptr, b_ptr, c_ptr,
M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr
):
offs_m = tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
a_block_ptr = tl.make_block_ptr(base=a_ptr, shape=(M, K), strides=(stride_am, stride_ak),
offsets=(0, 0), block_shape=(BLOCK_M, BLOCK_K), order=(1, 0))
b_block_ptr = tl.make_block_ptr(base=b_ptr, shape=(K, N), strides=(stride_bk, stride_bn),
offsets=(0, 0), block_shape=(BLOCK_K, BLOCK_N), order=(1, 0))
# Below two lines are just for testing negative offsets for the `advance` API, which could be removed
a_block_ptr = tl.advance(a_block_ptr, (BLOCK_M, -BLOCK_K))
a_block_ptr = tl.advance(a_block_ptr, (-BLOCK_M, BLOCK_K))
a = tl.load(a_block_ptr, boundary_check=(1, ), padding_option="zero")
b = tl.load(b_block_ptr, boundary_check=(0, ), padding_option="zero")
c = tl.dot(a, b)
c_ptrs = c_ptr + offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn
tl.store(c_ptrs, c)
@pytest.mark.parametrize("shape, num_warps", [
(shape, num_warps)
for shape in [
[64, 64, 16],
[64, 64, 32],
[64, 64, 64],
]
for num_warps in [4, 8]
])
def test_block_ptr_matmul_no_scf(shape, num_warps):
capability = torch.cuda.get_device_capability()
if capability[0] >= 9:
pytest.skip("Hopper support is working in progress")
m, n, k = shape
a = torch.randn((m, k), device="cuda", dtype=torch.float16)
b = torch.randn((k, n), device="cuda", dtype=torch.float16)
c = torch.empty((m, n), device="cuda", dtype=torch.float32)
grid = lambda META: (1, )
matmul_no_scf_with_advance_kernel[grid](a_ptr=a, b_ptr=b, c_ptr=c,
M=m, N=n, K=k,
stride_am=a.stride(0), stride_ak=a.stride(1),
stride_bk=b.stride(0), stride_bn=b.stride(1),
stride_cm=c.stride(0), stride_cn=c.stride(1),
BLOCK_M=m, BLOCK_N=n, BLOCK_K=k,
num_warps=num_warps)
golden = torch.matmul(a, b)
torch.testing.assert_allclose(c, golden)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,505
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/util/table.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import pathlib as pth
import collections.abc as abc
from .log import Logger
from .item import Item
from .tree import Tree
from .utils import scanner
log = Logger(__name__)
calc_dig = Item.calc_digest
class Table(abc.MutableMapping):
def __init__(self, base=None, trees=None, cols=None, **kw):
super().__init__()
self.base = base = pth.Path(base) if base else None
if not isinstance(cols, tuple):
cols = () if cols is None else (cols, )
def _cols():
cs = frozenset(cols)
ns = []
with os.scandir(base) as scan:
for e in scan:
if e.is_dir(follow_symlinks=False):
n = pth.Path(e.path).stem
if n not in cs:
ns.append(n)
ns.sort()
return (*cols, *ns)
self._cols = cols = _cols() if base is not None else cols
assert self._cols
if isinstance(trees, dict):
self._trees = trees
else:
if not isinstance(trees, tuple):
trees = () if trees is None else (trees, )
if base is not None:
def _trees():
ts = frozenset(trees)
ns = []
for c in cols:
p = base / c
if p.exists():
with os.scandir(p) as scan:
for e in scan:
if e.is_dir(follow_symlinks=False):
n = pth.Path(e.path).stem
if n not in ts:
ns.append(n)
ns.sort()
return (*trees, *ns)
trees = _trees()
self._trees = {n: Tree(n, **kw) for n in trees}
assert self._trees
def __bool__(self):
return True
__hash__ = None
def __eq__(self, other):
if isinstance(other, type(self)):
return (self._trees == other._trees and self._cols == other._cols)
return NotImplemented
def __len__(self):
return len(self._trees)
def __iter__(self):
return iter(self._trees)
def __getitem__(self, n):
return self._trees[n]
def __setitem__(self, n, tree):
self._trees[n] = tree
def __delitem__(self, n):
del self._trees[n]
def __repr__(self):
s = type(self).__name__
b = str(self.base) if self.base else None
s += "({}".format(repr(b))
s += ", {}".format(repr(self._trees))
s += ", {})".format(repr(self._cols))
return s
def stringer(self, indent=0, **kw):
for t in self._trees.values():
yield from t.stringer(indent=indent, **kw)
def walker(self, trees=None, cols=None, **_):
if trees is None:
trees = self._trees.values()
else:
trees = trees if isinstance(trees, tuple) else (trees, )
trees = [self._trees[t] for t in trees if t in self._trees]
if not trees:
log.warning('Empty trees list')
if cols is None:
cols = self._cols
else:
cols = cols if isinstance(cols, tuple) else (cols, )
cols = [c for c in cols if c in self._cols]
if not cols:
log.warning('Empty cols list')
for t in trees:
for c in cols:
yield (t, c)
def adjust_kw(self, kw):
kw['base'] = base = kw.get('base') or self.base
assert base.exists() and base.is_dir()
try:
kw['touch'] = [c for c in kw['touch'] if c in self._cols]
except KeyError:
pass
return base
async def import_rows(self, src, cols=None, **kw):
self.adjust_kw(kw)
for t, c in self.walker(**kw, cols=cols or self._cols[0]):
s = src / c if isinstance(cols, tuple) else src
s = t.appender(scanner(s, c), **kw, col=c)
await t.apply(calc_dig, **kw, src=s, col=c)
t.normalize((c, self._cols), **kw)
t.copy_items(c, **kw)
async def load_cols(self, cols=None, src=None, **kw):
base = self.adjust_kw(kw)
src = src or base
for t, c in self.walker(**kw, cols=cols):
s = src / c / t.name
s = t.appender(scanner(s, c), **kw, col=c)
await t.apply(calc_dig, **kw, src=s, col=c)
t.normalize(**kw)
t.copy_items(c, **kw)
def dump_cols(self, dst, cols=None, **kw):
self.adjust_kw(kw)
for t, c in self.walker(**kw, cols=cols):
t.copy_items(c, **kw, out=dst)
def clear_cols(self, cols=None, **kw):
self.adjust_kw(kw)
for t, c in self.walker(**kw, cols=cols):
t.clear_col(c, **kw)
async def check_items(self, **kw):
kw['path'] = self.adjust_kw(kw)
for t, c in self.walker(**kw):
if not await t.apply(calc_dig, check=True, **kw, col=c):
return False
return True
async def extract(self, col=None, **kw):
self.adjust_kw(kw)
col = col if col in self._cols else self._cols[-1]
for t, c in self.walker(**kw, cols=col):
s = t.extractor(**kw, col=c)
s = t.appender(s, **kw, col=c)
await t.apply(calc_dig, **kw, src=s, col=c)
t.normalize(**kw)
t.copy_items(c, **kw)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,506
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/language/standard.py
|
from __future__ import annotations
from ..runtime.jit import jit
from . import core
# -----------------------
# Standard library
# -----------------------
@jit
def cdiv(x, div):
"""
Computes the ceiling division of :code:`x` by :code:`div`
:param x: the input number
:type input: Block
:param div: the divisor
:param div: Block
"""
return (x + div - 1) // div
@jit
@core._add_math_1arg_docstr("sigmoid")
def sigmoid(x):
return 1 / (1 + core.exp(-x))
@jit
@core._add_math_1arg_docstr("softmax")
def softmax(x, ieee_rounding=False):
z = x - core.max(x, 0)
num = core.exp(z)
den = core.sum(num, 0)
return core.fdiv(num, den, ieee_rounding)
@jit
def ravel(x):
"""
Returns a contiguous flattened view of :code:`x`.
:param x: the input tensor
:type x: Block
"""
return core.view(x, [x.numel])
@jit
def swizzle2d(i, j, size_i, size_j, size_g):
"""
Transforms indices of a row-major size_i*size_j matrix into those
of one where indices are row major for each group of size_j rows.
For example, for size_i = size_j = 4 and size_g = 2, it will transform
[[0 , 1 , 2 , 3 ],
[4 , 5 , 6 , 7 ],
[8 , 9 , 10, 11],
[12, 13, 14, 15]]
into
[[0, 2, 4 , 6 ],
[1, 3, 5 , 7 ],
[8, 10, 12, 14],
[9, 11, 13, 15]]
"""
# "unrolled index in array"
ij = i * size_j + j
# number of elements in `size_g` groups
# of `size_j` columns
size_gj = size_g * size_j
# index of the group in which (i,j) is
group_id = ij // size_gj
# row-index of the first element of this group
off_i = group_id * size_g
# last group may have fewer rows
size_g = core.minimum(size_i - off_i, size_g)
# new row and column indices
new_i = off_i + (ij % size_g)
new_j = (ij % size_gj) // size_g
return new_i, new_j
@jit
def zeros(shape, dtype):
"""
Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`.
:param shape: Shape of the new array, e.g., (8, 16) or (8, )
:type shape: tuple of ints
:param dtype: Data-type of the new array, e.g., :code:`tl.float16`
:type dtype: DType
"""
return core.full(shape, 0, dtype)
@jit
def zeros_like(input):
return zeros(input.shape, input.dtype)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,507
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/filters.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
import pprint as pp
from .log import Logger
from .base import config
from .resource import Resource
from .date import Date # needed for repr reading
log = Logger(__name__)
class AdrFilter:
def __init__(self, incl=(), doms=(), locs=(), fulls=()):
super().__init__()
def init(ds, ss):
return {k: True if k in ss else False for k in set((*ds, *ss))}
self.incl = init(config.include_adrs, incl)
self.doms = init(config.exclude_doms, doms)
self.locs = init(config.exclude_locs, locs)
self.fulls = init(config.exclude_fulls, fulls)
def __repr__(self):
s = '{}('.format(type(self).__name__)
def keys(es):
return pp.pformat(tuple(sorted(k for k, v in es if v)), indent=4)
s += '{}, '.format(keys(self.incl.items()))
s += '{}, '.format(keys(self.doms.items()))
s += '{}, '.format(keys(self.locs.items()))
s += '{})'.format(keys(self.fulls.items()))
return s
def probe(self, adr):
if adr in self.incl:
self.incl[adr] = True
return True
if adr in self.fulls:
self.fulls[adr] = True
return False
ps = adr.split('@')
if len(ps) == 2:
l, ad = ps
ds = ad.split('.')
d2 = ds[-2] + '.' + ds[-1]
for d in (d2, ad):
if d in self.incl:
self.incl[d] = True
return True
if l in self.incl:
self.incl[l] = True
return True
for d in ('.' + ds[-1], d2, ad):
if d in self.doms:
self.doms[d] = True
return False
if l in self.locs:
self.locs[l] = True
return False
else:
log.info('Invalid address {}', adr)
class RAdrFilter:
def __init__(self, spec, **_):
super().__init__()
self.spec = spec
self._cspec = re.compile(spec, re.ASCII)
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.spec)
def probe(self, adr):
if self._cspec.match(adr):
return False
class Filters(Resource):
_res_path = config.qnar_dst + 'filts/filters.qnr'
_flog = None
_adrs = None
@classmethod
def globals(cls):
return globals()
def __init__(self, specs=(), simple=None, **kw):
super().__init__(**kw)
self.extend(specs or config.exclude_specs)
self.simple = simple or AdrFilter()
def __repr__(self):
s = '{}('.format(type(self).__name__)
s += '{!r}, '.format(tuple(sorted(self.keys())))
s += '{})'.format(pp.pformat(self.simple, indent=4))
return s
@property
def flog(self):
if self._flog is None:
self._flog = Flog.create(self.base, self.realm)
self._flog.clear()
return self._flog
@property
def adrs(self):
if self._adrs is None:
self._adrs = FAdrs.create(self.base, self.realm)
self._adrs.clear()
return self._adrs
def extend(self, specs):
for s in specs:
s = s.lower()
self[s] = RAdrFilter(s)
def probe(self, adr):
r = self.simple.probe(adr)
if r is None:
for f in self.values():
r = f.probe(adr)
if r is not None:
return r
else:
return r
self.adrs.incr(adr)
def save(self, pref=None):
super().save(pref)
if self._flog:
self._flog.save(pref)
if self._adrs:
self._adrs.save(pref)
class Flog(Resource):
_res_path = config.qnar_dst + 'filts/flog.qnr'
@classmethod
def globals(cls):
return globals()
def __repr__(self):
es = pp.pformat(self._elems, indent=4)
return '{}({})'.format(type(self).__name__, es)
def append(self, cur, fields):
ls = self.setdefault(cur, [])
ls.append(fields)
class FAdrs(Resource):
_res_path = config.qnar_dst + 'filts/fadrs.qnr'
@classmethod
def globals(cls):
return globals()
def __repr__(self):
es = pp.pformat(self._elems, indent=4)
return '{}({})'.format(type(self).__name__, es)
def incr(self, adr):
self[adr] = self.setdefault(adr, 0) + 1
def splits(self):
ls = {}
ds = {}
for a, c in self.items():
l, d = a.split('@')
fs = d.split('.')
d = fs[-2] + '.' + fs[-1]
ls[l] = ls.setdefault(l, 0) + c
ds[d] = ds.setdefault(d, 0) + c
return ds, ls
if __name__ == '__main__':
from .args import MArgs
from .resource import resource
a = MArgs()
a = a.parse_args()
with resource(FAdrs.create(a.base, a.files[0])) as fa:
ds, ls = fa.splits()
for d, n in sorted(ds.items(), key=lambda x: x[0], reverse=False):
print("'" + d + "',")
for l, n in sorted(ls.items(), key=lambda x: x[0], reverse=False):
print("'" + l + "',")
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,508
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/gpt_neo.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.gpt_neo import PreTrained
from ...models.gpt_neo import ForCausal
logging.set_verbosity_info()
log = logging.get_logger(__name__)
def load_src_weights(model, config, gpt_neo_checkpoint_path):
tf_path = abspath(gpt_neo_checkpoint_path)
log.info(f"Converting TensorFlow checkpoint from {tf_path}")
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
if "global_step" not in name and "adam" not in name:
array = tf.train.load_variable(tf_path, name)
array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
name = name.replace("attn/q", "attn/attention/q_proj/w")
name = name.replace("attn/k", "attn/attention/k_proj/w")
name = name.replace("attn/v", "attn/attention/v_proj/w")
name = name.replace("attn/o", "attn/attention/out_proj/w")
name = name.replace("norm_1", "ln_1")
name = name.replace("norm_2", "ln_2")
name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "gpt2/"
name = name.split("/")
pointer = model.transformer
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if name[-1] == "w" and name[-2] in [
"out_proj",
"k_proj",
"q_proj",
"v_proj",
"c_proj",
"c_fc",
]:
array = array.transpose()
if name == ["wte"]:
array = array[: config.s_vocab]
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}"
)
print(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
embs = model.transformer.wte.weight
lin = torch.nn.Linear(embs.size()[1], embs.size()[0], bias=False)
lin.weight = embs
model.set_output_embeddings(lin)
return model
def to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
config_json = json.load(open(config_file, "r"))
cfg = GPTNeoConfig(
d_hidden=config_json["n_embd"],
n_lays=config_json["n_lays"],
n_heads=config_json["n_heads"],
attention_types=config_json["attention_types"],
n_pos=config_json["n_pos"],
drop_resid=config_json["res_dropout"],
drop_embed=config_json["drop_embed"],
drop_attn=config_json["attn_dropout"],
)
print(f"Building from config: {cfg}")
m = ForCausal(cfg)
load_src_weights(m, cfg, tf_checkpoint_path)
print(f"Saving to: {pytorch_dump_path}")
m.save_pretrained(pytorch_dump_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", default=None, type=str, required=True)
x.add_argument("--cfg_path", default=None, type=str, required=True)
x.add_argument("--save_path", default=None, type=str, required=True)
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,509
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/try/attention.py
|
import flash_attn_cuda
import math
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import triton
import triton.language as tl
from einops import rearrange, repeat
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
# TMP,
L,
M,
Y,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
):
start = tl.program_id(0)
off = tl.program_id(1)
offs_d = tl.arange(0, BLOCK_K)
offs_m = start * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
_, s_qh, s_qm, s_qk = Q.stride()
_, _, s_kn, s_kk = K.stride()
_, _, s_vk, _ = V.stride()
_, s_yh, s_ym, s_yn = Y.stride()
q = tl.load(Q + off * s_qh + offs_m[:, None] * s_qm + offs_d[None, :] * s_qk)
ks = K + off * s_qh + offs_n[None, :] * s_kn + offs_d[:, None] * s_kk
vs = V + off * s_qh + offs_n[:, None] * s_qm + offs_d[None, :] * s_qk
l = tl.zeros([BLOCK_M], dtype=tl.float32)
m = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
y = tl.zeros([BLOCK_M, BLOCK_K], dtype=tl.float32)
# ts = TMP + off * N_CTX + offs_m
for i in range(0, (start + 1) * BLOCK_M, BLOCK_N):
# i = tl.multiple_of(i, BLOCK_N)
k = tl.load(ks + i * s_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k) # , tl.trans(k)) , trans_b=True)
qk *= sm_scale
qk = tl.where(offs_m[:, None] >= (i + offs_n[None, :]), qk, float("-inf"))
m2 = tl.maximum(tl.max(qk, 1), m)
l *= tl.exp(m - m2)
p = tl.exp(qk - m2[:, None])
l2 = tl.sum(p, 1) + l
l3 = 1.0 / l2
p *= l3[:, None]
y *= (l * l3)[:, None]
v = tl.load(vs + i * s_vk)
p = p.to(Q.dtype.element_ty)
y += tl.dot(p, v)
l = l2
m = m2
m2 = tl.max(qk, 1)
p = tl.exp(qk - m2[:, None])
m3 = tl.maximum(m, m2)
alpha = tl.exp(m - m3)
beta = tl.exp(m2 - m3)
l2 = alpha * l + beta * tl.sum(p, 1)
p_scale = beta / l2
p = p * p_scale[:, None]
y_scale = l / l2 * alpha
tl.store(ts, y_scale)
y_scale = tl.load(ts) # BUG: have to store and immediately load
y = y * y_scale[:, None]
v = tl.load(vs + i * s_vk)
p = p.to(v.dtype)
y += tl.dot(p, v)
l = l2
m = m3
tl.store(L + off * N_CTX + offs_m, l)
tl.store(M + off * N_CTX + offs_m, m)
tl.store(Y + off * s_yh + offs_m[:, None] * s_ym + offs_d[None, :] * s_yn, y)
@triton.jit
def _bwd_prep(
Y,
DY,
L,
NewDY,
Delta,
BLOCK_M: tl.constexpr,
D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
y = tl.load(Y + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
dy = tl.load(DY + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
denom = tl.load(L + off_m).to(tl.float32)
dy = dy / denom[:, None]
delta = tl.sum(y * dy, axis=1)
tl.store(NewDY + off_m[:, None] * D_HEAD + off_n[None, :], dy)
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q,
K,
V,
sm_scale,
Y,
DY,
DQ,
DK,
DV,
L,
M,
D,
Z,
H,
N_CTX,
num_block,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
):
o_zh = tl.program_id(0)
o_z = o_zh // H
o_h = o_zh % H
s_qz, s_qh, s_qm, s_qk = Q.stride()
_, _, s_kn, s_kk = K.stride()
off = o_z * s_qz + o_h * s_qh
offs_k = tl.arange(0, BLOCK_K)
for i in range(0, num_block):
i *= BLOCK_M
offs_m = i + tl.arange(0, BLOCK_M)
offs_n = i + tl.arange(0, BLOCK_M)
qs = Q + off + (offs_m[:, None] * s_qm + offs_k[None, :] * s_qk)
ks = K + off + (offs_n[:, None] * s_kn + offs_k[None, :] * s_kk)
vs = V + off + (offs_n[:, None] * s_qm + offs_k[None, :] * s_qk)
dqs = DQ + off + (offs_m[:, None] * s_qm + offs_k[None, :] * s_qk)
dys = DY + off + (offs_m[:, None] * s_qm + offs_k[None, :] * s_qk)
ds = D + o_zh * N_CTX
ms = M + o_zh * N_CTX
dv = tl.zeros([BLOCK_M, BLOCK_K], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_K], dtype=tl.float32)
k = tl.load(ks)
v = tl.load(vs)
for j in range(i, num_block * BLOCK_M, BLOCK_M):
j += tl.arange(0, BLOCK_N)
q = tl.load(qs)
qk = tl.dot(q, tl.trans(k)) # , trans_b=True)
qk = tl.where(j[:, None] >= (offs_n[None, :]), qk, float("-inf"))
m = tl.load(ms + j)
p = tl.exp(qk * sm_scale - m[:, None])
dy = tl.load(dys)
dv += tl.dot(
tl.trans(p.to(Q.dtype.element_ty)), dy
) # p.to(dy.dtype), dy, trans_a=True)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - tl.load(ds + j)[:, None]
dp += tl.dot(dy, tl.trans(v)) # , trans_b=True)
ds = p * dp * sm_scale
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) # ds.to(q.dtype), q, trans_a=True)
dq = tl.load(dqs) # , eviction_policy="evict_last")
dq += tl.dot(ds.to(Q.dtype.element_ty), k) # ds.to(k.dtype), k)
tl.store(dqs, dq) # , eviction_policy="evict_last")
qs += BLOCK_M * s_qm
dqs += BLOCK_M * s_qm
dys += BLOCK_M * s_qm
tl.store(DK + off + (offs_n[:, None] * s_kn + offs_k[None, :] * s_kk), dk)
tl.store(DV + off + (offs_n[:, None] * s_qm + offs_k[None, :] * s_qk), dv)
empty = torch.empty(128, device="cuda")
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
assert torch.cuda.get_device_capability()[0] > 7
BLOCK = 128
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
y = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
tmp = torch.empty(
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
)
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
# tmp,
L,
m,
y,
q.shape[0],
q.shape[1],
q.shape[2],
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
BLOCK_K=Lk,
num_warps=num_warps,
num_stages=2, # =1,
)
ctx.save_for_backward(q, k, v, y, L, m)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_M = BLOCK
ctx.BLOCK_N = BLOCK
ctx.BLOCK_K = Lk
return y
@staticmethod
def backward(ctx, dy):
q, k, v, y, l, m = ctx.saved_tensors
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
dy = dy.contiguous()
dy_scaled = torch.empty_like(dy)
delta = torch.empty_like(l)
_bwd_prep[(ctx.grid[0] * ctx.grid[1],)](
y,
dy,
l,
dy_scaled,
delta,
BLOCK_M=ctx.BLOCK_M,
D_HEAD=ctx.BLOCK_K,
)
_bwd_kernel[(ctx.grid[1],)](
q,
k,
v,
ctx.sm_scale,
y,
dy_scaled,
dq,
dk,
dv,
l,
m,
delta,
q.shape[0],
q.shape[1],
q.shape[2],
ctx.grid[0],
BLOCK_M=ctx.BLOCK_M,
BLOCK_N=ctx.BLOCK_N,
BLOCK_K=ctx.BLOCK_K,
num_warps=8,
num_stages=1,
)
return dq, dk, dv, None # dq.to(q.dtype),
attention = _attention.apply
@pytest.mark.parametrize("Z, H, N_CTX, D_HEAD", [(4, 48, 1024, 64)])
def test_op(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
torch.manual_seed(20)
q = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.1, std=0.2)
.requires_grad_()
)
k = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.4, std=0.2)
.requires_grad_()
)
v = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.3, std=0.2)
.requires_grad_()
)
sm_scale = 0.2
dy = torch.randn_like(q)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
for z in range(Z):
for h in range(H):
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
y_ref = torch.matmul(p, v)
y_ref.backward(dy)
dv_ref, v.grad = v.grad.clone(), None
dk_ref, k.grad = k.grad.clone(), None
dq_ref, q.grad = q.grad.clone(), None
y_triton = attention(q, k, v, sm_scale)
y_triton.backward(dy)
dv_triton, v.grad = v.grad.clone(), None
dk_triton, k.grad = k.grad.clone(), None
dq_triton, q.grad = q.grad.clone(), None
assert torch.allclose(y_ref, y_triton, atol=1e-2, rtol=0)
assert torch.allclose(dv_ref, dv_triton, atol=1e-2, rtol=0)
assert torch.allclose(dk_ref, dk_triton, atol=1e-2, rtol=0)
assert torch.allclose(dq_ref, dq_triton, atol=1e-2, rtol=0)
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
@triton.testing.perf_report(
[
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 14)],
line_arg="provider",
line_vals=["triton", "flash"],
line_names=["Triton", "Flash"],
styles=[("red", "-"), ("blue", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"D_HEAD": D_HEAD,
"dtype": torch.float16,
"mode": mode,
},
)
for mode in ["fwd", "bwd"]
]
)
def benchmark(BATCH, H, N_CTX, D_HEAD, mode, provider, dtype=torch.float16, device="cuda"):
assert mode in ["fwd", "bwd"]
warmup = 25
rep = 100
if provider == "triton":
q = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
sm_scale = 1.3
f = lambda: attention(q, k, v, sm_scale)
if mode == "bwd":
y = f()
dy = torch.randn_like(y)
f = lambda: y.backward(dy, retain_graph=True)
else:
assert provider == "flash"
lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
cu_seqlens[1:] = lengths.cumsum(0)
qkv = torch.randn(
(BATCH * N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True
)
f = lambda: flash_attn_func(qkv, cu_seqlens, 0.0, N_CTX, causal=True)
if mode == "bwd":
y = f()
dy = torch.randn_like(y)
f = lambda: y.backward(dy, retain_graph=True)
ms = triton.testing.do_bench(f, warmup=warmup, rep=rep)
return ms
# only works on post-Ampere GPUs right now
benchmark.run(save_path=".", print_data=True)
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, x, indices):
ctx.save_for_backward(indices)
assert x.ndim >= 2
ctx.first_axis_dim, s = x.shape[0], x.shape[1:]
return torch.gather(
rearrange(x, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=s.numel())
).reshape(-1, *s)
@staticmethod
def backward(ctx, x):
(indices,) = ctx.saved_tensors
assert x.ndim >= 2
s = x.shape[1:]
x = rearrange(x, "b ... -> b (...)")
y = torch.zeros(
[ctx.first_axis_dim, x.shape[1]],
device=x.device,
dtype=x.dtype,
)
y.scatter_(0, repeat(indices, "z -> z d", d=x.shape[1]), x)
return y.reshape(ctx.first_axis_dim, *s), None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, x, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert x.ndim >= 2
y = torch.zeros(first_axis_dim, *x.shape[1:], device=x.device, dtype=x.dtype)
y[indices] = x
# y.scatter_(0, repeat(indices, 'z -> z d', d=x.shape[1]), x)
return y
@staticmethod
def backward(ctx, x):
(indices,) = ctx.saved_tensors
y = x[indices]
# y = torch.gather(x, 0, repeat(indices, 'z -> z d', d=x.shape[1]))
return y, None, None
index_put_first_axis = IndexPutFirstAxis.apply
class IndexFirstAxisResidual(torch.autograd.Function):
@staticmethod
def forward(ctx, x, indices):
ctx.save_for_backward(indices)
assert x.ndim >= 2
ctx.first_axis_dim, s = x.shape[0], x.shape[1:]
second_dim = s.numel()
y = x[indices]
return y, x.detach()
@staticmethod
def backward(ctx, x, grad_residual):
(indices,) = ctx.saved_tensors
assert x.ndim >= 2
s = x.shape[1:]
assert grad_residual.shape[1:] == s
y = grad_residual
# y[indices] += x
indices = indices.reshape(indices.shape[0], *((1,) * (x.ndim - 1)))
indices = indices.expand_as(x)
y.scatter_add_(0, indices, x)
return y.reshape(ctx.first_axis_dim, *s), None
index_first_axis_residual = IndexFirstAxisResidual.apply
def unpad_input(x, mask):
seqlens_in_batch = mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return (
index_first_axis(rearrange(x, "b s ... -> (b s) ..."), indices),
indices,
cu_seqlens,
max_seqlen_in_batch,
)
def pad_input(x, indices, batch, seqlen):
dim = x.shape[-1]
# y = torch.zeros((batch * seqlen), dim, device=x.device, dtype=x.dtype)
# y[indices] = x
y = index_put_first_axis(x, indices, batch * seqlen)
return rearrange(y, "(b s) ... -> b s ...", b=batch)
def _get_block_size(device, head_dim, is_dropout):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
def _flash_attn_forward(
q,
k,
v,
out,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_softmax,
num_splits=0,
generator=None,
):
softmax_lse, rng_state, *rest = flash_attn_cuda.fwd(
q,
k,
v,
out,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
False,
causal,
return_softmax,
num_splits,
generator,
)
# if out.isnan().any() or softmax_lse.isnan().any():
# breakpoint()
S_dmask = rest[0] if return_softmax else None
return out, softmax_lse, rng_state, S_dmask
def _flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
rng_state=None,
num_splits=0,
generator=None,
):
dout = dout.contiguous()
_, _, _, softmax_d = flash_attn_cuda.bwd(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
False,
causal,
num_splits,
generator,
rng_state,
)
# if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
# breakpoint()
return dq, dk, dv, softmax_d
class FlashAttnQKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
cu_seqlens,
max_seqlen,
dropout_p,
softmax_scale,
causal,
return_softmax,
deterministic,
):
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
torch.empty_like(qkv[:, 0]),
cu_seqlens,
cu_seqlens,
max_seqlen,
max_seqlen,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
)
ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen = max_seqlen
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout,
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
softmax_lse,
dqkv[:, 0],
dqkv[:, 1],
dqkv[:, 2],
cu_seqlens,
cu_seqlens,
ctx.max_seqlen,
ctx.max_seqlen,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
rng_state=rng_state,
num_splits=1 if ctx.deterministic else 0,
)
return dqkv, None, None, None, None, None, None, None
class FlashAttnKVPackedFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
kv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_softmax,
deterministic,
):
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
q,
kv[:, 0],
kv[:, 1],
torch.empty_like(q),
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
)
ctx.save_for_backward(q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
_flash_attn_backward(
dout,
q,
kv[:, 0],
kv[:, 1],
out,
softmax_lse,
dq,
dkv[:, 0],
dkv[:, 1],
cu_seqlens_q,
cu_seqlens_k,
ctx.max_seqlen_q,
ctx.max_seqlen_k,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
rng_state=rng_state,
num_splits=1 if ctx.deterministic else 0,
)
return dq, dkv, None, None, None, None, None, None, None, None, None
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_softmax,
deterministic,
):
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
out, softmax_lse, rng_state, S_dmask = _flash_attn_forward(
q,
k,
v,
torch.empty_like(q),
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
)
ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state)
ctx.dropout_p = dropout_p
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
return out if not return_softmax else (out, softmax_lse, S_dmask)
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
_flash_attn_backward(
dout,
q,
k,
v,
out,
softmax_lse,
dq,
dk,
dv,
cu_seqlens_q,
cu_seqlens_k,
ctx.max_seqlen_q,
ctx.max_seqlen_k,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
rng_state=rng_state,
num_splits=1 if ctx.deterministic else 0,
)
return dq, dk, dv, None, None, None, None, None, None, None, None, None
class FlashAttnQKVPackedSplitFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
qkv,
cu_seqlens,
max_seqlen0,
max_seqlen1,
batch_size0,
dropout_p,
softmax_scale,
causal,
return_softmax,
deterministic,
):
# Save rng_state because the backward pass will regenerate the dropout mask
if dropout_p > 0:
rng_state0 = torch.cuda.get_rng_state()
generator1 = torch.Generator(device="cuda")
rng_state1 = generator1.get_state()
else:
rng_state0, generator1, rng_state1 = None, None, None
if softmax_scale is None:
softmax_scale = qkv.shape[-1] ** (-0.5)
out = torch.empty_like(qkv[:, 0])
_, softmax_lse0, S_dmask0 = _flash_attn_forward(
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
cu_seqlens[: batch_size0 + 1],
cu_seqlens[: batch_size0 + 1],
max_seqlen0,
max_seqlen0,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_, softmax_lse1, S_dmask1 = _flash_attn_forward(
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:],
max_seqlen1,
max_seqlen1,
dropout_p,
softmax_scale,
causal=causal,
return_softmax=return_softmax,
generator=generator1,
)
torch.cuda.current_stream().wait_stream(s)
ctx.save_for_backward(
qkv, out, softmax_lse0, softmax_lse1, cu_seqlens, rng_state0, rng_state1
)
ctx.dropout_p = dropout_p
ctx.max_seqlen0 = max_seqlen0
ctx.max_seqlen1 = max_seqlen1
ctx.batch_size0 = batch_size0
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.deterministic = deterministic
if not return_softmax:
return out
else:
max_seqlen_q = max(softmax_lse0.shape[2], softmax_lse1.shape[2])
max_seqlen_k = max(S_dmask0.shape[3], S_dmask1.shape[3])
softmax_lse = torch.cat(
[
F.pad(softmax_lse0, (0, max_seqlen_q - softmax_lse0.shape[2])),
F.pad(softmax_lse1, (0, max_seqlen_q - softmax_lse1.shape[2])),
],
dim=0,
)
return out, softmax_lse, S_dmask0, S_dmask1
@staticmethod
def backward(ctx, dout, *args):
qkv, out, softmax_lse0, softmax_lse1, cu_seqlens, rng_state0, rng_state1 = ctx.saved_tensors
batch_size0 = ctx.batch_size0
if rng_state0 is not None:
cur_rng_state = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(rng_state0)
if rng_state1 is not None:
generator1 = torch.Generator(device="cuda")
generator1.set_state(rng_state1)
else:
generator1 = None
dqkv = torch.empty_like(qkv)
_flash_attn_backward(
dout,
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
softmax_lse0,
dqkv[:, 0],
dqkv[:, 1],
dqkv[:, 2],
cu_seqlens[: batch_size0 + 1],
cu_seqlens[: batch_size0 + 1],
ctx.max_seqlen0,
ctx.max_seqlen0,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
num_splits=1 if ctx.deterministic else 0,
)
s = torch.cuda.Stream()
with torch.cuda.stream(s):
_flash_attn_backward(
dout,
qkv[:, 0],
qkv[:, 1],
qkv[:, 2],
out,
softmax_lse1,
dqkv[:, 0],
dqkv[:, 1],
dqkv[:, 2],
cu_seqlens[batch_size0:],
cu_seqlens[batch_size0:],
ctx.max_seqlen1,
ctx.max_seqlen1,
ctx.dropout_p,
ctx.softmax_scale,
ctx.causal,
generator=generator1,
num_splits=1 if ctx.deterministic else 0,
)
torch.cuda.current_stream().wait_stream(s)
if rng_state0 is not None:
torch.cuda.set_rng_state(cur_rng_state)
return dqkv, None, None, None, None, None, None, None, None, None
def flash_attn_unpadded_qkvpacked_func(
qkv,
cu_seqlens,
max_seqlen,
dropout_p,
softmax_scale=None,
causal=False,
return_attn_probs=False,
deterministic=False,
):
return FlashAttnQKVPackedFunc.apply(
qkv,
cu_seqlens,
max_seqlen,
dropout_p,
softmax_scale,
causal,
return_attn_probs,
deterministic,
)
def flash_attn_unpadded_kvpacked_func(
q,
kv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=None,
causal=False,
return_attn_probs=False,
deterministic=False,
):
return FlashAttnKVPackedFunc.apply(
q,
kv,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_attn_probs,
deterministic,
)
def flash_attn_unpadded_func(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=None,
causal=False,
return_attn_probs=False,
deterministic=False,
):
return FlashAttnFunc.apply(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale,
causal,
return_attn_probs,
deterministic,
)
def flash_attn_unpadded_qkvpacked_split_func(
qkv,
cu_seqlens,
max_seqlen0,
max_seqlen1,
batch_size0,
dropout_p,
softmax_scale=None,
causal=False,
return_attn_probs=False,
deterministic=False,
):
return FlashAttnQKVPackedSplitFunc.apply(
qkv,
cu_seqlens,
max_seqlen0,
max_seqlen1,
batch_size0,
dropout_p,
softmax_scale,
causal,
return_attn_probs,
deterministic,
)
def flash_attn_func(
qkv, cu_seqlens, dropout_p, max_s, softmax_scale=None, causal=False, return_attn_probs=False
):
return flash_attn_unpadded_qkvpacked_func(
qkv, cu_seqlens, max_s, dropout_p, softmax_scale, causal, return_attn_probs
)
class FlashAttention(nn.Module):
def __init__(self, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(
self,
qkv,
key_padding_mask=None,
causal=False,
cu_seqlens=None,
max_s=None,
need_weights=False,
):
assert not need_weights
assert qkv.dtype in [torch.float16, torch.bfloat16]
assert qkv.is_cuda
if cu_seqlens is None:
batch_size = qkv.shape[0]
seqlen = qkv.shape[1]
if key_padding_mask is None:
qkv = rearrange(qkv, "b s ... -> (b s) ...")
max_s = seqlen
cu_seqlens = torch.arange(
0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, device=qkv.device
)
output = flash_attn_unpadded_qkvpacked_func(
qkv,
cu_seqlens,
max_s,
self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale,
causal=causal,
)
output = rearrange(output, "(b s) ... -> b s ...", b=batch_size)
else:
nheads = qkv.shape[-2]
x = rearrange(qkv, "b s three h d -> b s (three h d)")
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(
x_unpad,
cu_seqlens,
max_s,
self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale,
causal=causal,
)
output = rearrange(
pad_input(
rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, batch_size, seqlen
),
"b s (h d) -> b s h d",
h=nheads,
)
else:
assert max_s is not None
output = flash_attn_unpadded_qkvpacked_func(
qkv,
cu_seqlens,
max_s,
self.dropout_p if self.training else 0.0,
softmax_scale=self.softmax_scale,
causal=causal,
)
return output, None
class FlashMHA(nn.Module):
def __init__(
self,
embed_dim,
num_heads,
bias=True,
batch_first=True,
attention_dropout=0.0,
causal=False,
device=None,
dtype=None,
) -> None:
assert batch_first
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
assert (
self.head_dim % 8 == 0 and self.head_dim <= 128
), "Only support head_dim <= 128 and divisible by 8"
self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
self.inner_attn = FlashAttention(attention_dropout=attention_dropout)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
def forward(self, x, key_padding_mask=None, need_weights=False):
qkv = self.Wqkv(x)
qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, h=self.num_heads)
context, attn_weights = self.inner_attn(
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal
)
return self.out_proj(rearrange(context, "b s h d -> b s (h d)")), attn_weights
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,510
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/ibert_quant_modules.py
|
import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
# collect running stats if training
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
# Initialization
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
# exponential moving average (EMA)
# use momentum to prevent the quantized values change greatly every iteration
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
# this is for the input quantization
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
# assert that prev_act_scaling_factor is a scalar tensor
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int) # adjusts `self.shift`
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
# compute sqrt of the feature dimension if it is the first run
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
# Normalization: computes mean and variance(std)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
# overflow handling in training time
if self.training:
# if overflow is detected
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
# To be replaced with integer-sqrt kernel that produces the same output
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
# scaling and shifting
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
# lower_index += 1
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
# quantized = float / scale + zero_point
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
# trans the input to be a 1-d tensor
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,511
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/section.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .realm import Agent
from .part import Setting
from .meta import with_property
from .message import StoryPost, BlogPost
from .message import Note, Message, Chain, Letter, Doc
class Section:
incl = ()
excl = ()
hide = ()
_extent = ()
_groups = _subgroups = _parts = None
def __init__(self, extent, **kw):
super().__init__(**kw)
self.extent = extent
@property
def extent(self):
return self._extent
@extent.setter
def extent(self, ps):
if ps:
e = []
for p in ps:
c = type(p)
if issubclass(c, self.incl) and not issubclass(c, self.excl):
if not p.parent:
e.append(p)
self._extent = sorted(e)
else:
self.__dict__.pop('_extent', None)
self.__dict__.pop('_groups', None)
self.__dict__.pop('_subgroups', None)
self.__dict__.pop('_parts', None)
@property
def groups(self):
if self._groups is None:
self.setup()
return self._groups
@property
def subgroups(self):
if self._subgroups is None:
self.setup()
return self._subgroups
@property
def parts(self):
if self._parts is None:
self.setup()
return self._parts
def setup(self):
gs = []
sg_g = {}
ps_s = {}
ps = set()
for p in self.extent:
s = p.subgroup
if s:
g = s.group
if g and g.slug in self.hide:
p.hide = True
continue
if g and g not in ps:
gs.append(g)
ps.add(g)
if s not in ps:
sg_g.setdefault(g, []).append(s)
ps.add(s)
ps_s.setdefault(s, []).append(p)
self._groups = sorted(gs)
for g, ss in sg_g.items():
sg_g[g] = sorted(ss)
self._subgroups = sg_g if self._groups else sg_g.get(None, ())
# for s, ps in ps_s.items():
# ps_s[s] = sorted(ps)
self._parts = ps_s if self._subgroups else ps_s.get(None, ())
class Story(Section):
incl = (StoryPost, )
hide = ('about', 'blurbs')
class Blog(Section):
incl = (BlogPost, )
class Agents(Section):
incl = (Agent, )
class Docs(Section):
incl = (Note, Message, Chain, Letter, Doc)
excl = (StoryPost, BlogPost)
def update(self, settings):
pass
@with_property('settings', Setting.creator)
class Session:
def __init__(self, app, settings=(), **kw):
super().__init__(**kw)
self.parts_all = app.parts_all
self.story = app.story
self.blog = app.blog
self.agents = app.agents
self.docs = Docs(app.parts_flat)
self.settings = settings
def update(self):
self.docs.update(self.settings)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,512
|
quantapix/qnarre
|
refs/heads/main
|
/notebooks/old/src/trackable.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# !pip install -U tf-nightly-2.0-preview
import tensorflow as tf
from datetime import datetime
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import tracking
def trackable(tr1, v):
c = tf.train.Checkpoint(tr1=tr1)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
p = m.latest_checkpoint
c.restore(p).expect_partial()
if p:
print(f'restored from: {p}')
print(f'others are: {m.checkpoints}')
else:
print('start from scratch')
print(f'value before: {v.numpy()}')
v.assign_add(1)
m.save()
def autotrackable(tr2, tracked, untracked):
c = tf.train.Checkpoint(tr2=tr2)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
p = m.latest_checkpoint
c.restore(p).expect_partial()
if p:
print(f'restored from: {p}')
print(f'values before: {tracked.numpy()}, {untracked.numpy()}')
tracked.assign_add(1000)
m.save()
print(f'value as saved: {tracked.numpy()}')
def listing():
c = tf.train.Checkpoint()
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
p = m.latest_checkpoint
vs = tf.train.list_variables(p)
print(f'names and shapes list: {vs}')
n, _ = vs[-1]
v = tf.train.load_variable(p, n)
print(f'loaded value: {v} for name: {n}')
c = tf.train.load_checkpoint(p)
ts = c.get_variable_to_dtype_map()
ss = c.get_variable_to_shape_map()
print(f'checkpoint types: {ts} and shapes: {ss}')
def deleting(tr2):
c = tf.train.Checkpoint(tr2=tr2)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
c.restore(m.latest_checkpoint)
c.tr2.deleted = tf.Variable(-1)
m.save()
vs = tf.train.list_variables(m.latest_checkpoint)
print(f'list deleted: {vs}')
del c.tr2.deleted
m.save()
vs = tf.train.list_variables(m.latest_checkpoint)
print(f'deleted IS DELETED: {vs}')
def containers(tr3):
c = tf.train.Checkpoint(tr3=tr3)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
m.save()
vs = tf.train.list_variables(m.latest_checkpoint)
print(f'containers: {vs}')
def sharing(tr3):
c = tf.train.Checkpoint(tr3=tr3)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
c.restore(m.latest_checkpoint).assert_consumed()
v1 = tr3.br_list[0].v
v2 = tr3.br_list[1].v
vd1 = tr3.br_dict['br1'].v
vd2 = tr3.br_dict['br2'].v
vd3 = tr3.br_dict['br3'].v
print(f'all fives: {v1.numpy()}, {v2.numpy()}, {vd3.numpy()}')
print(f'shared too: {vd1.numpy()}, {vd2.numpy()}')
v1.assign_add(5)
v2.assign_add(5)
vd3.assign_add(5)
m.save()
vs = tf.train.list_variables(m.latest_checkpoint)
print(f'shared not repeated: {vs}')
v1.assign_add(-10)
v2.assign_add(-10)
vd3.assign_add(-10)
print(f'all zeros: {v1.numpy()}, {v2.numpy()}, {vd3.numpy()}')
print(f'shared too: {vd1.numpy()}, {vd2.numpy()}')
c2 = tf.train.Checkpoint(tr3=tr3)
m = tf.train.CheckpointManager(c2, '/tmp/q/trackable', max_to_keep=2)
c2.restore(m.latest_checkpoint).assert_consumed()
print(f'all tens: {v1.numpy()}, {v2.numpy()}, {vd3.numpy()}')
print(f'shared too: {vd1.numpy()}, {vd2.numpy()}')
class Module(tf.Module):
sub = None
def __init__(self, name=None):
super().__init__(name=name)
with self.name_scope:
self.v = tf.Variable(1, name='m_v')
def __str__(self):
s = f'n: {self.name}, v: {self.v.numpy()}'
if self.sub:
s += f', s: ({self.sub})'
return s
@tf.Module.with_name_scope
def __call__(self):
if self.sub is None:
y = tf.constant(100)
else:
y = self.sub()
y = tf.math.add(y, self.v)
self.v.assign(y)
return y
def modules(mod):
vs = [v.name for v in mod.variables]
ms = [m.name for m in mod.submodules]
print(f'mod variables: {vs}, submodules: {ms}')
c = tf.train.Checkpoint(module=mod)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
mod()
print(mod)
m.save()
mod()
print(mod)
p = m.latest_checkpoint
vs = tf.train.list_variables(p)
print(f'containers: {vs}')
c.restore(p)
print(f'restored: {mod}')
def graph(tracer):
s = datetime.now().strftime('%Y%m%d-%H%M%S')
d = f'/tmp/q/logs/func/{s}'
w = tf.summary.create_file_writer(d)
tf.summary.trace_on(graph=True) # , profiler=True)
tracer()
with w.as_default():
tf.summary.trace_export(name="trace", step=0, profiler_outdir=d)
class Layer(tf.keras.layers.Layer):
def __init__(self, sub=None, **kw):
super().__init__(**kw)
self.sub = sub
def __str__(self):
s = f'n: {self.name}, v: {self.v.numpy()}'
if self.sub:
s += f', s: ({self.sub})'
return s
def build(self, input_shape):
self.v = self.add_weight(name='l_v',
shape=[],
dtype=tf.int32,
initializer=tf.ones_initializer)
return super().build(input_shape)
def call(self, x):
if self.sub is None:
y = x
else:
y = self.sub(x)
y = tf.math.add(y, self.v)
self.v.assign(tf.reduce_sum(y))
return y
def models(mod, lay):
print(mod.summary())
vs = [v.name for v in mod.variables]
ts = [t.name for t in mod.trainable_variables]
ms = [m.name for m in mod.submodules]
print(f'lay variables: {vs}, trainables: {ts}, submodules: {ms}')
d = tf.constant([100, 100])
mod(d)
print(lay)
c = tf.train.Checkpoint(model=mod)
m = tf.train.CheckpointManager(c, '/tmp/q/trackable', max_to_keep=2)
m.save()
mod(d)
print(lay)
p = m.latest_checkpoint
vs = tf.train.list_variables(p)
print(f'containers: {vs}')
c.restore(p)
print(f'restored: {lay}')
def main(_):
tr1 = base.Trackable()
v = tf.Variable(1)
tr1._track_trackable(v, name='tr1_v')
for _ in range(3):
trackable(tr1, v)
tr2 = tracking.AutoTrackable()
tracked, untracked = tf.Variable(1000), tf.Variable(0)
tr2.v = tracked
with base.no_automatic_dependency_tracking_scope(tr2):
tr2.untracked = untracked
for _ in range(2):
autotrackable(tr2, tracked, untracked)
listing()
deleting(tr2)
tr3 = tracking.AutoTrackable()
br1 = tracking.AutoTrackable()
br1.v = tf.Variable(5)
br2 = tracking.AutoTrackable()
br2.v = tf.Variable(5)
tr3.br_list = [br1, br2]
br3 = tracking.AutoTrackable()
br3.v = tf.Variable(5)
tr3.br_dict = {'br3': br3}
containers(tr3)
tr3.br_dict = {'br1': br1, 'br2': br2, 'br3': br3}
sharing(tr3)
mod1 = Module('m1')
mod1.sub = Module('m2')
mod1.sub.sub = Module('m3')
modules(mod1)
# @tf.function
# def tracer1():
# return mod1()
# graph(tracer1)
ins = [tf.keras.Input(shape=(), dtype=tf.int32)]
lay = Layer(name='l1', sub=Layer(name='l2', sub=Layer(name='l3')))
outs = [lay(ins)]
mod2 = tf.keras.Model(name='m2', inputs=ins, outputs=outs)
models(mod2, lay)
@tf.function
def tracer2():
return mod2(tf.constant([100, 100]))
graph(tracer2)
if __name__ == '__main__':
from absl import app
app.run(main)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,513
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/big_bird.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
{"drop_proj"},
dict(
act="gelu_new",
attn_type="block_sparse",
block_size=64,
BOS=1,
d_ff=3072,
d_model=768,
drop_attn=0.1,
drop=0.1,
EOS=2,
grad_checkpoint=True,
init_range=0.02,
is_enc_dec=False,
eps=1e-12,
model_type="big_bird",
n_heads=12,
n_lays=12,
n_pos=4096,
n_rand_blocks=3,
n_typ=2,
PAD=0,
pos_type="absolute",
rescale=False,
s_vocab=50358,
SEP=66,
use_bias=True,
y_cache=True,
),
)
def _init_weights(self, module):
if isinstance(module, qc.Linear):
module.weight.data.normal_(mean=0.0, std=self.cfg.init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, qc.Embedding):
module.weight.data.normal_(mean=0.0, std=self.cfg.init_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, qc.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_grad_checkpoint(self, module, value=False):
if isinstance(module, BigBirdEncoder):
module.grad_checkpoint = value
MAP = {
"google/bigbird-roberta-base": dict(
archs=["ForPreTraining"],
grad_checkpoint=False,
),
"google/bigbird-roberta-large": dict(
archs=["ForMasked"],
d_ff=4096,
d_model=1024,
grad_checkpoint=False,
n_heads=16,
n_lays=24,
),
"google/bigbird-base-trivia-itc": dict(
archs=["ForQA"],
grad_checkpoint=False,
n_typ=16,
),
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,514
|
quantapix/qnarre
|
refs/heads/main
|
/notebooks/old/src/masking.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# !pip install -U tf-nightly-2.0-preview
from datetime import datetime
import tensorflow as tf
import dataset as qd
ks = tf.keras
kl = ks.layers
@tf.function
def adapter(d, len_max_input):
ds = tf.RaggedTensor.from_sparse(d['defs'])
ss = tf.fill([ds.nrows(), 1], qd.SEP)
os = tf.RaggedTensor.from_sparse(d['op'])
x = tf.concat([ds, ss, os], axis=1).to_tensor()
x = tf.pad(x, [[0, 0], [0, len_max_input - tf.shape(x)[-1]]])
y = tf.RaggedTensor.from_sparse(d['res'])[:, :1].to_tensor()
return x, y
def dset_for(ps):
ds = tf.data.TFRecordDataset(list(qd.files(ps)))
ds = ds.batch(ps.dim_batch)
fs = {
'defs': tf.io.VarLenFeature(tf.int64),
'op': tf.io.VarLenFeature(tf.int64),
'res': tf.io.VarLenFeature(tf.int64),
}
ds = ds.map(lambda x: tf.io.parse_example(x, fs)).map(qd.caster)
return ds.map(lambda d: adapter(d, tf.constant(ps.len_max_input)))
class Layer(kl.Layer):
def __init__(self, **kw):
super().__init__(**kw)
self.supports_masking = True
class Masking(Layer):
def __init__(self):
super().__init__()
# self._compute_output_and_mask_jointly = True
def compute_mask(self, x, mask=None):
return tf.not_equal(x, 0)
def call(self, x):
# x._keras_mask = self.compute_mask(x)
return x
class Embed(Layer):
def __init__(self, ps):
super().__init__(dtype=tf.float32)
s = (ps.dim_vocab, ps.dim_hidden)
self.emb = self.add_weight(name='emb', shape=s)
def call(self, x, mask=None):
y = tf.nn.embedding_lookup(self.emb, x)
if mask is not None:
y *= tf.cast(mask, tf.float32)[:, :, None]
return y
class Reflect(Layer):
def build(self, shape):
s = shape[-1]
self.scale = 1 / (s**0.5)
self.q = self.add_weight(name='q', shape=(s, s))
self.k = self.add_weight(name='k', shape=(s, s))
self.v = self.add_weight(name='v', shape=(s, s))
return super().build(shape)
def call(self, x, mask=None):
q = tf.einsum('bsi,ij->bsj', x, self.q)
k = tf.einsum('bsi,ij->bsj', x, self.k)
y = tf.einsum('bsi,bzi->bsz', q, k) * self.scale
if mask is not None:
# tf.print(' *** applying mask')
m = tf.logical_not(mask)
m = tf.cast(m, tf.float32)[:, :, None]
y += m * -1e9
v = tf.einsum('bsi,ij->bsj', x, self.v)
y = tf.einsum('bsz,bzi->bsi', tf.nn.softmax(y), v)
return y
def model_for(ps):
x = ks.Input(shape=(ps.len_max_input, ), dtype='int32')
y = Masking()(x)
y = Embed(ps)(y)
y = Reflect()(y)
y = kl.Reshape((ps.len_max_input * ps.dim_hidden, ))(y)
y = kl.Dense(ps.dim_dense, activation='relu')(y)
y = kl.Dense(ps.dim_vocab, name='dbd', activation=None)(y)
m = ks.Model(inputs=x, outputs=y)
m.compile(optimizer=ps.optimizer, loss=ps.loss, metrics=[ps.metric])
print(m.summary())
return m
def main_graph(ps, ds, m):
ld = datetime.now().strftime('%Y%m%d-%H%M%S')
ld = f'/tmp/q/logs/{ld}'
cs = [ks.callbacks.TensorBoard(log_dir=ld, histogram_freq=1)]
m.fit(ds, callbacks=cs, epochs=ps.num_epochs)
params = dict(
dim_batch=2,
dim_dense=150,
dim_hidden=15,
dim_vocab=len(qd.vocab),
len_max_input=20,
loss=ks.losses.SparseCategoricalCrossentropy(from_logits=True),
metric=ks.metrics.SparseCategoricalCrossentropy(from_logits=True),
num_epochs=10,
num_shards=2,
optimizer=ks.optimizers.Adam(),
)
if __name__ == '__main__':
ps = qd.Params(**params)
main_graph(ps, dset_for(ps), model_for(ps))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,515
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/funnel.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.funnel import PreTrained
log = logging.get_logger(__name__)
from torch.nn import CrossEntropyLoss
LIST = [
"funnel-transformer/small", # B4-4-4H768
"funnel-transformer/small-base", # B4-4-4H768, no decoder
"funnel-transformer/medium", # B6-3x2-3x2H768
"funnel-transformer/medium-base", # B6-3x2-3x2H768, no decoder
"funnel-transformer/intermediate", # B6-6-6H768
"funnel-transformer/intermediate-base", # B6-6-6H768, no decoder
"funnel-transformer/large", # B8-8-8H1024
"funnel-transformer/large-base", # B8-8-8H1024, no decoder
"funnel-transformer/xlarge-base", # B10-10-10H1024
"funnel-transformer/xlarge", # B10-10-10H1024, no decoder
]
INF = 1e6
class FunnelEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.layer_norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, input_ids=None, inputs_embeds=None):
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
embeddings = self.layer_norm(inputs_embeds)
embeddings = self.drop(embeddings)
return embeddings
class FunnelAttentionStructure(qc.Module):
cls_token_type_id = 2
def __init__(self, config):
super().__init__()
self.config = config
self.sin_dropout = qc.Dropout(config.drop)
self.cos_dropout = qc.Dropout(config.drop)
self.pooling_mult = None
def init_attention_inputs(
self,
inputs_embeds,
attention_mask=None,
token_type_ids=None,
):
self.pooling_mult = 1
self.seq_len = seq_len = inputs_embeds.size(1)
position_embeds = self.get_position_embeds(
seq_len, inputs_embeds.dtype, inputs_embeds.device
)
token_type_mat = (
self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
)
cls_mask = (
F.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0))
if self.config.separate_cls
else None
)
return (position_embeds, token_type_mat, attention_mask, cls_mask)
def token_type_ids_to_mat(self, token_type_ids):
token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None]
# Treat <cls> as in the same segment as both A & B
cls_ids = token_type_ids == self.cls_token_type_id
cls_mat = cls_ids[:, :, None] | cls_ids[:, None]
return cls_mat | token_type_mat
def get_position_embeds(self, seq_len, dtype, device):
d_model = self.config.d_model
if self.config.attention_type == "factorized":
pos_seq = torch.arange(0, seq_len, 1.0, dtype=dtype, device=device)
freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device)
inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
sinusoid = pos_seq[:, None] * inv_freq[None]
sin_embed = torch.sin(sinusoid)
sin_embed_d = self.sin_dropout(sin_embed)
cos_embed = torch.cos(sinusoid)
cos_embed_d = self.cos_dropout(cos_embed)
# This is different from the formula on the paper...
phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1)
psi = torch.cat([cos_embed, sin_embed], dim=-1)
pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1)
omega = torch.cat([-sin_embed, cos_embed], dim=-1)
return (phi, pi, psi, omega)
else:
freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=dtype, device=device)
inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
# Maximum relative positions for the first input
rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=dtype, device=device)
zero_offset = seq_len * 2
sinusoid = rel_pos_id[:, None] * inv_freq[None]
sin_embed = self.sin_dropout(torch.sin(sinusoid))
cos_embed = self.cos_dropout(torch.cos(sinusoid))
pos_embed = torch.cat([sin_embed, cos_embed], dim=-1)
pos = torch.arange(0, seq_len, dtype=dtype, device=device)
pooled_pos = pos
position_embeds_list = []
for block_index in range(0, self.config.num_blocks):
if block_index == 0:
position_embeds_pooling = None
else:
pooled_pos = self.stride_pool_pos(pos, block_index)
# construct rel_pos_id
stride = 2 ** (block_index - 1)
rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
rel_pos = rel_pos[:, None] + zero_offset
rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos)
# Second type
pos = pooled_pos
stride = 2**block_index
rel_pos = self.relative_pos(pos, stride)
rel_pos = rel_pos[:, None] + zero_offset
rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos)
position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
return position_embeds_list
def stride_pool_pos(self, pos_id, block_index):
if self.config.separate_cls:
cls_pos = pos_id.new_tensor([-(2**block_index) + 1])
pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:]
return torch.cat([cls_pos, pooled_pos_id[::2]], 0)
else:
return pos_id[::2]
def relative_pos(self, pos, stride, pooled_pos=None, shift=1):
if pooled_pos is None:
pooled_pos = pos
ref_point = pooled_pos[0] - pos[0]
num_remove = shift * len(pooled_pos)
max_dist = ref_point + num_remove * stride
min_dist = pooled_pos[0] - pos[-1]
return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device)
def stride_pool(self, tensor, axis):
if tensor is None:
return None
if isinstance(axis, (list, tuple)):
for ax in axis:
tensor = self.stride_pool(tensor, ax)
return tensor
if isinstance(tensor, (tuple, list)):
return type(tensor)(self.stride_pool(x, axis) for x in tensor)
axis %= tensor.ndim
axis_slice = (
slice(None, -1, 2)
if self.config.separate_cls and self.config.truncate_seq
else slice(None, None, 2)
)
enc_slice = [slice(None)] * axis + [axis_slice]
if self.config.separate_cls:
cls_slice = [slice(None)] * axis + [slice(None, 1)]
tensor = torch.cat([tensor[cls_slice], tensor], axis=axis)
return tensor[enc_slice]
def pool_tensor(
self,
tensor,
mode="mean",
stride=2,
):
if tensor is None:
return None
if isinstance(tensor, (tuple, list)):
return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
if self.config.separate_cls:
suffix = tensor[:, :-1] if self.config.truncate_seq else tensor
tensor = torch.cat([tensor[:, :1], suffix], dim=1)
ndim = tensor.ndim
if ndim == 2:
tensor = tensor[:, None, :, None]
elif ndim == 3:
tensor = tensor[:, None, :, :]
# Stride is applied on the second-to-last dimension.
stride = (stride, 1)
if mode == "mean":
tensor = F.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True)
elif mode == "max":
tensor = F.max_pool2d(tensor, stride, stride=stride, ceil_mode=True)
elif mode == "min":
tensor = -F.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True)
else:
raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
if ndim == 2:
return tensor[:, 0, :, 0]
elif ndim == 3:
return tensor[:, 0]
return tensor
def pre_attention_pooling(self, output, attention_inputs):
position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
if self.config.pool_q_only:
if self.config.attention_type == "factorized":
position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
token_type_mat = self.stride_pool(token_type_mat, 1)
cls_mask = self.stride_pool(cls_mask, 0)
output = self.pool_tensor(output, mode=self.config.pooling_type)
else:
self.pooling_mult *= 2
if self.config.attention_type == "factorized":
position_embeds = self.stride_pool(position_embeds, 0)
token_type_mat = self.stride_pool(token_type_mat, [1, 2])
cls_mask = self.stride_pool(cls_mask, [1, 2])
attention_mask = self.pool_tensor(attention_mask, mode="min")
output = self.pool_tensor(output, mode=self.config.pooling_type)
attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
return output, attention_inputs
def post_attention_pooling(self, attention_inputs):
position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
if self.config.pool_q_only:
self.pooling_mult *= 2
if self.config.attention_type == "factorized":
position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
token_type_mat = self.stride_pool(token_type_mat, 2)
cls_mask = self.stride_pool(cls_mask, 1)
attention_mask = self.pool_tensor(attention_mask, mode="min")
attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
return attention_inputs
def _relative_shift_gather(positional_attn, context_len, shift):
batch_size, n_heads, seq_len, max_rel_len = positional_attn.shape
positional_attn = torch.reshape(positional_attn, [batch_size, n_heads, max_rel_len, seq_len])
positional_attn = positional_attn[:, :, shift:, :]
positional_attn = torch.reshape(
positional_attn, [batch_size, n_heads, seq_len, max_rel_len - shift]
)
positional_attn = positional_attn[..., :context_len]
return positional_attn
class FunnelRelMultiheadAttention(qc.Module):
def __init__(self, config, block_index):
super().__init__()
self.config = config
self.block_index = block_index
d_model, n_heads, d_head = config.d_model, config.n_heads, config.d_head
self.drop = qc.Dropout(config.drop)
self.drop_attn = qc.Dropout(config.drop_attn)
self.q_head = qc.Linear(d_model, n_heads * d_head, bias=False)
self.k_head = qc.Linear(d_model, n_heads * d_head)
self.v_head = qc.Linear(d_model, n_heads * d_head)
self.r_w_bias = nn.Parameter(torch.zeros([n_heads, d_head]))
self.r_r_bias = nn.Parameter(torch.zeros([n_heads, d_head]))
self.r_kernel = nn.Parameter(torch.zeros([d_model, n_heads, d_head]))
self.r_s_bias = nn.Parameter(torch.zeros([n_heads, d_head]))
self.seg_embed = nn.Parameter(torch.zeros([2, n_heads, d_head]))
self.post_proj = qc.Linear(n_heads * d_head, d_model)
self.layer_norm = qc.LayerNorm(d_model, eps=config.eps)
self.scale = 1.0 / (d_head**0.5)
def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
if self.config.attention_type == "factorized":
phi, pi, psi, omega = position_embeds
# Shape n_heads x d_head
u = self.r_r_bias * self.scale
# Shape d_model x n_heads x d_head
w_r = self.r_kernel
# Shape batch_size x sea_len x n_heads x d_model
q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r)
q_r_attention_1 = q_r_attention * phi[:, None]
q_r_attention_2 = q_r_attention * pi[:, None]
# Shape batch_size x n_heads x seq_len x context_len
positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum(
"bind,jd->bnij", q_r_attention_2, omega
)
else:
shift = 2 if q_head.shape[1] != context_len else 1
# Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
# Grab the proper positional encoding, shape max_rel_len x d_model
r = position_embeds[self.block_index][shift - 1]
# Shape n_heads x d_head
v = self.r_r_bias * self.scale
# Shape d_model x n_heads x d_head
w_r = self.r_kernel
# Shape max_rel_len x n_heads x d_model
r_head = torch.einsum("td,dnh->tnh", r, w_r)
# Shape batch_size x n_heads x seq_len x max_rel_len
positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head)
# Shape batch_size x n_heads x seq_len x context_len
positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
if cls_mask is not None:
positional_attn *= cls_mask
return positional_attn
def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
"""Relative attention score for the token_type_ids"""
if token_type_mat is None:
return 0
batch_size, seq_len, context_len = token_type_mat.shape
# q_head has shape batch_size x seq_len x n_heads x d_head
# Shape n_heads x d_head
r_s_bias = self.r_s_bias * self.scale
# Shape batch_size x n_heads x seq_len x 2
token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
# Shape batch_size x n_heads x seq_len x context_len
token_type_mat = token_type_mat[:, None].expand(
[batch_size, q_head.shape[2], seq_len, context_len]
)
# Shapes batch_size x n_heads x seq_len
diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1)
# Shape batch_size x n_heads x seq_len x context_len
token_type_attn = torch.where(
token_type_mat,
same_token_type.expand(token_type_mat.shape),
diff_token_type.expand(token_type_mat.shape),
)
if cls_mask is not None:
token_type_attn *= cls_mask
return token_type_attn
def forward(
self,
query,
key,
value,
attention_inputs,
output_attentions=False,
):
position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
batch_size, seq_len, _ = query.shape
context_len = key.shape[1]
n_heads, d_head = self.config.n_heads, self.config.d_head
# Shape batch_size x seq_len x n_heads x d_head
q_head = self.q_head(query).view(batch_size, seq_len, n_heads, d_head)
# Shapes batch_size x context_len x n_heads x d_head
k_head = self.k_head(key).view(batch_size, context_len, n_heads, d_head)
v_head = self.v_head(value).view(batch_size, context_len, n_heads, d_head)
q_head = q_head * self.scale
# Shape n_heads x d_head
r_w_bias = self.r_w_bias * self.scale
# Shapes batch_size x n_heads x seq_len x context_len
content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
positional_attn = self.relative_positional_attention(
position_embeds, q_head, context_len, cls_mask
)
token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
# merge attention scores
attn_score = content_score + positional_attn + token_type_attn
# precision safe in case of mixed precision training
dtype = attn_score.dtype
attn_score = attn_score.float()
# perform masking
if attention_mask is not None:
attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float())
# attention probability
attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype)
attn_prob = self.drop_attn(attn_prob)
# attention output, shape batch_size x seq_len x n_heads x d_head
attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head)
# Shape shape batch_size x seq_len x d_model
attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_heads * d_head))
attn_out = self.drop(attn_out)
output = self.layer_norm(query + attn_out)
return (output, attn_prob) if output_attentions else (output,)
class FunnelPositionwiseFFN(qc.Module):
def __init__(self, config):
super().__init__()
self.linear_1 = qc.Linear(config.d_model, config.d_inner)
self.act = qu.activation(config.act)
self.drop_act = qc.Dropout(config.drop_act)
self.linear_2 = qc.Linear(config.d_inner, config.d_model)
self.drop = qc.Dropout(config.drop)
self.norm = qc.LayerNorm(config.d_model, config.eps)
def forward(self, hidden):
h = self.linear_1(hidden)
h = self.act(h)
h = self.drop_act(h)
h = self.linear_2(h)
h = self.drop(h)
return self.norm(hidden + h)
class Layer(qc.Module):
def __init__(self, config, block_index):
super().__init__()
self.attention = FunnelRelMultiheadAttention(config, block_index)
self.ffn = FunnelPositionwiseFFN(config)
def forward(
self,
query,
key,
value,
attention_inputs,
output_attentions=False,
):
attn = self.attention(
query, key, value, attention_inputs, output_attentions=output_attentions
)
output = self.ffn(attn[0])
return (output, attn[1]) if output_attentions else (output,)
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_structure = FunnelAttentionStructure(config)
self.blocks = nn.ModuleList(
[
nn.ModuleList([Layer(config, block_index) for _ in range(block_size)])
for block_index, block_size in enumerate(config.block_sizes)
]
)
def forward(
self,
inputs_embeds,
attention_mask=None,
token_type_ids=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
# The pooling is not implemented on long tensors, so we convert this mask.
attention_mask = attention_mask.type_as(inputs_embeds)
attention_inputs = self.attention_structure.init_attention_inputs(
inputs_embeds,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
hidden = inputs_embeds
all_hidden_states = (inputs_embeds,) if output_hidden_states else None
all_attentions = () if output_attentions else None
for block_index, block in enumerate(self.blocks):
pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1)
pooling_flag = pooling_flag and block_index > 0
if pooling_flag:
pooled_model, attention_inputs = self.attention_structure.pre_attention_pooling(
hidden, attention_inputs
)
for layer_index, layer in enumerate(block):
for repeat_index in range(self.config.block_repeats[block_index]):
do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
if do_pooling:
query = pooled_model
key = value = hidden if self.config.pool_q_only else pooled_model
else:
query = key = value = hidden
layer_output = layer(
query, key, value, attention_inputs, output_attentions=output_attentions
)
hidden = layer_output[0]
if do_pooling:
attention_inputs = self.attention_structure.post_attention_pooling(
attention_inputs
)
if output_attentions:
all_attentions = all_attentions + layer_output[1:]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden,)
if not return_dict:
return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
return qo.Base(y=hidden, hiddens=all_hidden_states, attns=all_attentions)
def upsample(
x,
stride,
target_len,
separate_cls=True,
truncate_seq=False,
):
if stride == 1:
return x
if separate_cls:
cls = x[:, :1]
x = x[:, 1:]
output = torch.repeat_interleave(x, repeats=stride, dim=1)
if separate_cls:
if truncate_seq:
output = F.pad(output, (0, 0, 0, stride - 1, 0, 0))
output = output[:, : target_len - 1]
output = torch.cat([cls, output], dim=1)
else:
output = output[:, :target_len]
return output
class Decoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_structure = FunnelAttentionStructure(config)
self.layers = nn.ModuleList([Layer(config, 0) for _ in range(config.n_dec_lays)])
def forward(
self,
final_hidden,
first_block_hidden,
attention_mask=None,
token_type_ids=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
upsampled_model = upsample(
final_hidden,
stride=2 ** (len(self.config.block_sizes) - 1),
target_len=first_block_hidden.shape[1],
separate_cls=self.config.separate_cls,
truncate_seq=self.config.truncate_seq,
)
hidden = upsampled_model + first_block_hidden
all_hidden_states = (hidden,) if output_hidden_states else None
all_attentions = () if output_attentions else None
attention_inputs = self.attention_structure.init_attention_inputs(
hidden,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
for layer in self.layers:
layer_output = layer(
hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions
)
hidden = layer_output[0]
if output_attentions:
all_attentions = all_attentions + layer_output[1:]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden,)
if not return_dict:
return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
return qo.Base(y=hidden, hiddens=all_hidden_states, attns=all_attentions)
class FunnelDiscriminatorPredictions(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = qc.Linear(config.d_model, config.d_model)
self.dense_prediction = qc.Linear(config.d_model, 1)
def forward(self, discriminator_hidden_states):
hiddens = self.dense(discriminator_hidden_states)
hiddens = qu.activation(self.config.act)(hiddens)
logits = self.dense_prediction(hiddens).squeeze()
return logits
class FunnelBaseModel(PreTrained):
def __init__(self, config):
super().__init__(config)
self.embeddings = FunnelEmbeddings(config)
self.encoder = Encoder(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# TODO: deal with head_mask
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
encoder_outputs = self.encoder(
inputs_embeds,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = FunnelEmbeddings(config)
self.encoder = Encoder(config)
self.decoder = Decoder(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# TODO: deal with head_mask
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
encoder_outputs = self.encoder(
inputs_embeds,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=return_dict,
)
decoder_outputs = self.decoder(
final_hidden=encoder_outputs[0],
first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]],
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
idx = 0
outputs = (decoder_outputs[0],)
if output_hidden_states:
idx += 1
outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
if output_attentions:
idx += 1
outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
return outputs
return qo.Base(
y=decoder_outputs[0],
hiddens=(encoder_outputs.hiddens + decoder_outputs.hiddens)
if output_hidden_states
else None,
attns=(encoder_outputs.attns + decoder_outputs.attns) if output_attentions else None,
)
class ForPreTraining(PreTrained):
def __init__(self, config):
super().__init__(config)
self.funnel = Model(config)
self.discriminator_predictions = FunnelDiscriminatorPredictions(config)
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
discriminator_hidden_states = self.funnel(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output)
loss = None
if labels is not None:
loss_fct = nn.BCEWithLogitsLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
active_labels = labels[active_loss]
loss = loss_fct(active_logits, active_labels.float())
else:
loss = loss_fct(
logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()
)
if not return_dict:
output = (logits,) + discriminator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=logits,
hiddens=discriminator_hidden_states.hiddens,
attns=discriminator_hidden_states.attns,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = qc.Linear(cfg.d_model, cfg.s_vocab, **kw)
forward = qf.forward_masked
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.funnel = FunnelBaseModel(config)
self.classifier = Classifier(config, 1)
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.funnel(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
y = outputs[0]
pooled_output = y[:, 0]
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(cfg.d_model, "tanh", **kw)
forward = qf.forward_seq
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,516
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/common/__init__.py
|
from .build import _build
__all__ = ["_build"]
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,517
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/qnn.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .base import config
from .mboxes import Mboxes
from .log import Logger, start_stop_log
from .resource import resource
from .dispatch import Dispatch
# from .ptorch import TorchOne, TorchTwo
# from .tflow import Mnist
log = Logger(__name__)
class Qnn(Dispatch):
_res_path = config.qnar_dst + 'qnn.qnr'
_blog = 'blog'
_ctxt = None
@classmethod
def globals(cls):
return globals()
def setup(self, **kw):
# TorchOne().loop()
# TorchTwo().loop()
# Mnist().loop()
"""
with resource(self.ctxt) as ctxt:
kw.update(ctxt=ctxt)
with start_stop_log(log, 'Setting up Qnn'):
dst = '/' + self.realm
Mboxes(self.base).export_to(dst, **kw)
"""
def learn(self, **kw):
with resource(self.ctxt) as ctxt:
kw.update(ctxt=ctxt)
with start_stop_log(log, 'Setting up Qnn'):
dst = '/' + self.realm
Mboxes(self.base).export_to(dst, **kw)
def guess(self, **kw):
with resource(self.ctxt) as ctxt:
kw.update(ctxt=ctxt)
with start_stop_log(log, 'Setting up Qnn'):
dst = '/' + self.realm
Mboxes(self.base).export_to(dst, **kw)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,518
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/util/roster.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import shutil as sh
import filecmp as fc
import pathlib as pth
import collections as co
from hashlib import blake2b
from .log import Logger
from .base import config
from .counter import counters
from .resource import Resource, resource, Names
log = Logger(__name__)
def calc_digest(path, *, base=None, **_):
p = base / path if base else pth.Path(path)
if p.exists():
d, s = blake2b(digest_size=20), 0
with open(p, 'rb') as f:
for b in iter(lambda: f.read(65536), b''):
s += len(b)
d.update(b)
assert s == p.stat().st_size
return d.hexdigest(), s
log.warning("Cant't digest nonexistent file {}", p)
return None, None
class Entry(co.namedtuple('Entry', 'path digest size')):
__slots__ = ()
def __new__(cls, path, digest=None, size=None, **kw):
if not digest:
digest, size = calc_digest(path, **kw)
return super().__new__(cls, path, digest, size)
def __bool__(self):
return bool(self.path and self.digest is not None
and self.size is not None)
__hash__ = None
def __eq__(self, other):
if isinstance(other, type(self)):
d = self.digest
return (d and d == other.digest and self.size == other.size)
return NotImplemented
def __repr__(self):
s = "{}({!r}".format(type(self).__name__, str(self.path))
d = self.digest
if d:
s += ", {!r}, {}".format(d, self.size)
s += ")"
return s
def relative_to(self, path, base, **_):
try:
(base / self.path).relative_to(base / path)
except ValueError:
return False
return True
def check(self, **kw):
d = self.digest
if d:
d2, s = calc_digest(self.path, **kw)
if d2 == d and s == self.size:
return True
m = 'Mismatched digest for {}'
else:
m = 'No digest for {}'
log.info(m, self.path)
return False
def prune_dir(path, cntr=None, **_):
with os.scandir(path) as es:
for e in es:
p = pth.Path(e.path)
j = None
if p.name.startswith('.'):
if e.is_dir(follow_symlinks=False):
sh.rmtree(str(p))
elif p.suffix != '.qnr':
p.unlink()
log.info('Deleted {}', p)
j = '-'
elif e.is_dir(follow_symlinks=False):
prune_dir(p, cntr)
continue
if cntr:
cntr.incr(j)
try:
path.rmdir()
log.info('Deleted {}', path)
j = '-'
except:
j = None
if cntr:
cntr.incr(j)
class Roster(Resource):
_res_path = '.roster.qnr'
@classmethod
def globals(cls):
return globals()
def __init__(self, entries=None, **kw):
super().__init__(None, **kw)
self._expels = []
self._symlinks = []
if entries:
self.add_entry(entries)
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, tuple(self.entries))
def __str__(self):
s = '{}:'.format(str(self.base))
for e in self.entries:
s += '\n{} {} {}'.format(str(e.path), str(e.digest), e.size)
return s
@property
def entries(self):
es = [e for e in self.values() if isinstance(e, Entry)]
return sorted(es, key=lambda x: x.path)
def adjust_kw(self, kw):
def _adjust(key, default):
v = kw.get(key)
v = pth.Path(v) if v else default
kw[key] = v
_adjust('base', self.base)
def entry_adder(self, entry, cntr, modify=False, expel=True, **kw):
if isinstance(entry, Entry):
assert entry
p, d, s = entry
k = d, s
if p in self:
ok = self[p]
if k != ok:
if modify:
log.info('Modifying digest for {}', p)
del self[ok]
self[p] = k
self[k] = entry
cntr.incr(modify)
return
else:
log.warning('Digest mismatch for {}', p)
cntr.incr()
else:
try:
o = self[k]
except KeyError:
self[p] = k
self[k] = entry
yield p
else:
log.info('Duplicates: {} and {}', o.path, p)
if expel:
self._expels.append((o, entry))
cntr.incr()
else:
for e in entry:
yield from self.entry_adder(e, cntr, modify, expel, **kw)
add_args = ((('scanned', '.'), ('added', '+')), 'Adding:')
def add_entry(self, entry, **kw):
with counters(self.add_args, kw) as cs:
for _ in self.entry_adder(entry, **kw):
cs.incr('+')
return cs
def path_adder(self, path, **kw):
self.adjust_kw(kw)
p = str(pth.Path(path).relative_to(kw['base']))
yield from self.entry_adder(Entry(p, **kw), **kw)
def walker(self, paths=(), **kw):
for e in self.entries:
if paths:
for p in paths:
if e.relative_to(p, **kw):
break
else:
continue
yield e
def scanner(self, root, cntr, **kw):
def _paths(path):
with os.scandir(path) as es:
for e in es:
p = pth.Path(e.path)
if not p.name.startswith('.'):
if e.is_dir(follow_symlinks=False):
yield from _paths(p)
continue
elif e.is_file(follow_symlinks=False):
yield p
continue
elif e.is_symlink():
log.info('Symlink {}', p)
self._symlinks.append(p)
else:
log.info('Ignoring dir entry {}', p)
cntr.incr()
if root.exists():
for p in _paths(root):
yield from self.path_adder(p, **kw, cntr=cntr)
scan_args = ((('scanned', '.'), ('added', '+')), 'Scanning:')
def scan(self, paths=(), **kw):
self.adjust_kw(kw)
b = kw['base']
with counters(self.scan_args, kw) as cs:
for p in paths or ('', ):
for _ in self.scanner(b / p, **kw):
cs.incr('+')
return cs
rescan_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'),
('modified', 'm')), 'Rescanning:')
def rescanner(self, paths, cntr, **kw):
self.adjust_kw(kw)
b = kw['base']
es = [e for e in self.walker(paths, **kw) if not (b / e.path).exists()]
for p, d, s in es:
del self[p]
del self[(d, s)]
cntr.incr('-')
self._expels = []
for p in paths or ('', ):
for p in self.scanner(b / p, **kw, cntr=cntr, modify='m'):
yield p
def rescan(self, paths=(), **kw):
with counters(self.rescan_args, kw) as cs:
for _ in self.rescanner(paths, **kw):
cs.incr('+')
return cs
check_args = ((('passed', '.'), ('failed', 'F')), 'Checking:')
def check(self, paths=(), **kw):
self.adjust_kw(kw)
with counters(self.check_args, kw) as cs:
for e in self.walker(paths, **kw):
cs.incr('.' if e.check(**kw) else 'F')
return cs
def check_ok(self, paths=(), **kw):
return not self.check(paths, **kw)['F']
def rename_path(self, src, dst, cntr, cntr_key=None, **_):
if dst.exists():
log.warning("Can't move/rename, destination exists {}", dst)
cntr.incr('F')
else:
dst.parent.mkdir(parents=True, exist_ok=True)
src.rename(dst)
log.info('Moved/renamed {} to/as {}', src, dst)
cntr.incr(cntr_key)
expel_args = ((('scanned', '.'), ('expelled', 'e'), ('failed', 'F')),
'Expelling:')
def expel(self, ebase=None, **kw):
with counters(self.expel_args, kw) as cs:
self.adjust_kw(kw)
b = kw['base']
for o, d in self._expels:
op = b / o.path
dp = b / d.path
if fc.cmp(op, dp, shallow=False):
e = (ebase or (b.parent / 'expel')) / d.path
self.rename_path(dp, e, **kw, cntr_key='e')
else:
log.error('Duplicates compare failed {}, {}', op, dp)
cs.incr('F')
self._expels = []
return cs
def absorb_paths(self, paths=(), abase=None, **kw):
self.adjust_kw(kw)
b = kw['base']
ab = abase or (b.parent / 'absorb')
for p in paths or ('', ):
p = ab / p
if p.exists():
yield b, ab, p
absorb_args = ((('scanned', '.'), ('absorbed', 'a'), ('failed', 'F')),
'Absorbing:')
def absorb(self, paths=(), abase=None, **kw):
with counters(self.absorb_args, kw) as cs:
kw['expel'] = False
for b, ab, path in self.absorb_paths(paths, abase, **kw):
for p in [p for p in self.scanner(path, **kw, base=ab)]:
self.rename_path(ab / p, b / p, **kw, cntr_key='a')
prune_dir(path)
return cs
prune_args = ((('scanned', '.'), ('deleted', '-')), 'Pruning:')
def prune(self, paths=(), abase=None, **kw):
with counters(self.prune_args, kw) as cs:
for _, ab, p in self.absorb_paths(paths, abase, **kw):
prune_dir(p, **kw)
return cs
def namer(self, path, names, base, cntr, **_):
p = str(path)
if p not in names:
if (base / path).exists():
names[p] = np = p.lower().replace(' ', '-')
cntr.incr('.' if p == np else 'n')
path = path.parent
if path.name:
self.namer(path, names, base, cntr)
else:
cntr.incr('F')
names_args = ((('scanned', '.'), ('renamed', 'r'), ('normalized', 'n'),
('failed', 'F')), 'Naming:')
def names(self, paths=(), **kw):
with counters(self.names_args, kw) as cs:
self.adjust_kw(kw)
with resource(Names.create(kw['base'])) as ns:
ns.clear()
for e in self.walker(paths, **kw):
self.namer(pth.Path(e.path), ns, **kw)
return cs
rename_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'),
('modified', 'm'), ('normalized', 'n'), ('renamed', 'r'),
('failed', 'F')), 'Renaming:')
def rename(self, paths=(), **kw):
with counters(self.rename_args, kw) as cs:
self.adjust_kw(kw)
b = kw['base']
with resource(Names.create(b)) as ns:
if ns:
for e in self.walker(paths, **kw):
p = e.path
try:
d = b / ns.pop(p)
except KeyError:
cs.incr()
continue
self.rename_path(b / p, d, **kw, cntr_key='r')
ps = paths or ('', )
for o in sorted(ns.keys(), reverse=True):
d = b / ns.pop(o)
o = b / o
if o.exists() and o.is_dir():
for p in ps:
try:
o.relative_to(b / p)
break
except ValueError:
continue
else:
cs.incr()
continue
self.rename_path(o, d, **kw, cntr_key='r')
else:
cs.incr()
for p in self.rescanner(paths, **kw):
self.namer(pth.Path(p), ns, **kw)
return cs
if __name__ == '__main__':
from .args import BArgs
a = BArgs()
a.add_argument('paths', nargs='*', help='Paths to follow')
a.add_argument('-u', '--prune', action=a.st, help='Prune absorb dir')
a.add_argument('-a', '--absorb', help='Path to absorb uniques from')
a.add_argument('-x', '--rename', action=a.st, help='Rename files')
a.add_argument('-R', '--rescan', action=a.st, help='Rescan base')
a.add_argument('-s', '--scan', action=a.st, help='Scan base')
a.add_argument('-e', '--expel', help='Path to expel duplicates to')
a.add_argument('-c', '--check', action=a.st, help='Check all digests')
a.add_argument('-n', '--names', action=a.st, help='Names of files')
a = a.parse_args()
r = Roster.create(a.base)
if a.prune:
abase = None if a.absorb is None or a.absorb == config.DEFAULT else a.absorb
r.prune(a.paths, abase=abase)
elif a.absorb:
abase = None if a.absorb == config.DEFAULT else a.absorb
r.absorb(a.paths, abase=abase)
elif a.rename:
r.rename(a.paths)
else:
if a.rescan:
r.rescan(a.paths)
elif a.scan:
r.scan(a.paths)
if a.expel:
ebase = None if a.expel == config.DEFAULT else a.expel
r.expel(ebase=ebase)
if a.check:
r.check_ok(a.paths)
if a.names:
r.names(a.paths)
r.save()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,519
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/report.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import csv
import pathlib as pth
from qnarre import load_from
class Report:
fields = (
'Activism',
'Agency',
'Author',
'Coherence',
'Credibility',
'Date',
'Fragment',
'Genre',
'Judgment',
'Kind',
'Loss',
'Name',
'Narrative',
'Page',
'Para',
'Reality',
'Source',
'Text',
'Title',
'Topic',
'Turmoil',
'Type',
)
exclude = ()
def __init__(self, dst):
self.csv = csv.DictWriter(dst, self.fields)
self.csv.writeheader()
def write(self, node):
if node.__class__ not in self.exclude:
ls = node.fields
if isinstance(ls, list):
for fs in ls:
self.csv.writerow(fs)
else:
self.csv.writerow(ls)
def report(root, **kw):
kw.update(root=root)
print('Loading from {}...'.format(str(root)))
ns = set(n for n in load_from(pth.Path('merged.org'), **kw).net.nodes())
for n in ns:
print(n)
print('...done')
print('Reporting...')
with open(root / 'merged.csv', 'w', newline='') as f:
r = Report(f)
for n in ns:
r.write(n)
print('...done')
if __name__ == '__main__':
from argparse import ArgumentParser
args = ArgumentParser()
args.add_argument('-r', '--root', help='Path to root', default=None)
args = args.parse_args()
report(pth.Path.cwd() / (args.root or 'sample'))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,520
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/plbart.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import copy
import math
import random
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
from torch.nn import CrossEntropyLoss
log = logging.get_logger(__name__)
LIST = [
"uclanlp/plbart-base",
"uclanlp/plbart-cs-java",
"uclanlp/plbart-multi_task-all",
]
# Copied from transformers.models.mbart.modeling_mbart.shift_tokens_right
def shift_tokens_right(input_ids, PAD):
prev_output_tokens = input_ids.clone()
if PAD is None:
raise ValueError("self.model.config.PAD has to be defined.")
# replace possible -100 values in labels by `PAD`
prev_output_tokens.masked_fill_(prev_output_tokens == -100, PAD)
index_of_eos = (prev_output_tokens.ne(PAD).sum(dim=1) - 1).unsqueeze(-1)
decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
prev_output_tokens[:, 0] = decoder_start_tokens
return prev_output_tokens
# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->PLBart
class PLBartLearnedPositionalEmbedding(qc.Embed):
def __init__(self, num_embeddings, embedding_dim):
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids_shape, past_key_values_length=0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length,
past_key_values_length + seq_len,
dtype=torch.long,
device=self.weight.device,
)
return super().forward(positions + self.offset)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PLBart
class Attention(qc.Module):
def __init__(
self,
embed_dim,
n_heads,
drop: float = 0.0,
is_decoder=False,
bias=True,
):
super().__init__()
self.embed_dim = embed_dim
self.n_heads = n_heads
self.drop = drop
self.head_dim = embed_dim // n_heads
if (self.head_dim * n_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by n_heads (got `embed_dim`: {self.embed_dim}"
f" and `n_heads`: {n_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = qc.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = qc.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = qc.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = qc.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor, seq_len, bsz):
return tensor.view(bsz, seq_len, self.n_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hiddens,
key_value_states=None,
past_key_value=None,
attention_mask=None,
layer_head_mask=None,
output_attentions=False,
):
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hiddens.size()
# get query proj
query_states = self.q_proj(hiddens) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, crosses
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# crosses
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hiddens), -1, bsz)
value_states = self._shape(self.v_proj(hiddens), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hiddens), -1, bsz)
value_states = self._shape(self.v_proj(hiddens), -1, bsz)
if self.is_decoder:
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.n_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.n_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.n_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.n_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.n_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.n_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.n_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.n_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.n_heads, tgt_len, src_len)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.n_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.n_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.drop(attn_weights, p=self.drop, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.n_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.n_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.n_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->PLBart
class EncLayer(qc.Module):
def __init__(self, config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
n_heads=config.encoder_attention_heads,
drop=config.drop_attn,
)
self.self_attn_layer_norm = qc.LayerNorm(self.embed_dim)
self.drop = config.drop
self.activation_fn = qu.activation(config.act)
self.drop_act = config.drop_act
self.fc1 = qc.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = qc.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = qc.LayerNorm(self.embed_dim)
def forward(
self,
hiddens,
attention_mask,
layer_head_mask,
output_attentions=False,
):
residual = hiddens
hiddens, attn_weights, _ = self.self_attn(
hiddens=hiddens,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
hiddens = residual + hiddens
hiddens = self.self_attn_layer_norm(hiddens)
residual = hiddens
hiddens = self.activation_fn(self.fc1(hiddens))
hiddens = F.drop(hiddens, p=self.drop_act, training=self.training)
hiddens = self.fc2(hiddens)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
hiddens = residual + hiddens
hiddens = self.final_layer_norm(hiddens)
if hiddens.dtype == torch.float16 and (
torch.isinf(hiddens).any() or torch.isnan(hiddens).any()
):
clamp_value = torch.finfo(hiddens.dtype).max - 1000
hiddens = torch.clamp(hiddens, min=-clamp_value, max=clamp_value)
outputs = (hiddens,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->PLBart
class DecLayer(qc.Module):
def __init__(self, config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
n_heads=config.decoder_attention_heads,
drop=config.drop_attn,
is_decoder=True,
)
self.drop = config.drop
self.activation_fn = qu.activation(config.act)
self.drop_act = config.drop_act
self.self_attn_layer_norm = qc.LayerNorm(self.embed_dim)
self.encoder_attn = Attention(
self.embed_dim,
config.decoder_attention_heads,
drop=config.drop_attn,
is_decoder=True,
)
self.encoder_attn_layer_norm = qc.LayerNorm(self.embed_dim)
self.fc1 = qc.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = qc.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = qc.LayerNorm(self.embed_dim)
def forward(
self,
hiddens,
attention_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
output_attentions=False,
y_cache=True,
):
residual = hiddens
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hiddens, self_attn_weights, present_key_value = self.self_attn(
hiddens=hiddens,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
hiddens = residual + hiddens
hiddens = self.self_attn_layer_norm(hiddens)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if enc_hiddens is not None:
residual = hiddens
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hiddens, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hiddens=hiddens,
key_value_states=enc_hiddens,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
hiddens = residual + hiddens
hiddens = self.encoder_attn_layer_norm(hiddens)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hiddens
hiddens = self.activation_fn(self.fc1(hiddens))
hiddens = F.drop(hiddens, p=self.drop_act, training=self.training)
hiddens = self.fc2(hiddens)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
hiddens = residual + hiddens
hiddens = self.final_layer_norm(hiddens)
outputs = (hiddens,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if y_cache:
outputs += (present_key_value,)
return outputs
class Encoder(PreTrained):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.drop = config.drop
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.PAD
self.max_source_positions = config.n_pos
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = qc.Embed(config.s_vocab, embed_dim, self.padding_idx)
self.embed_positions = PLBartLearnedPositionalEmbedding(
config.n_pos,
embed_dim,
)
self.layers = nn.ModuleList([EncLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = qc.LayerNorm(embed_dim)
self.gradient_checkpointing = False
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hiddens = inputs_embeds + embed_pos
hiddens = self.layernorm_embedding(hiddens)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
# expand attention_mask
if attention_mask is not None:
attention_mask = qu.expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hiddens,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hiddens,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hiddens,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hiddens = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hiddens,)
if not return_dict:
return tuple(v for v in [hiddens, encoder_states, all_attentions] if v is not None)
return qo.Base(y=hiddens, hiddens=encoder_states, attns=all_attentions)
# Copied from transformers.models.bart.modeling_bart.BartDecoder with Bart->PLBart
class Decoder(PreTrained):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.drop = config.drop
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.PAD
self.max_target_positions = config.n_pos
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = qc.Embed(config.s_vocab, config.d_model, self.padding_idx)
self.embed_positions = PLBartLearnedPositionalEmbedding(
config.n_pos,
config.d_model,
)
self.layers = nn.ModuleList([DecLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = qc.LayerNorm(config.d_model)
self.gradient_checkpointing = False
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = qu.causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
expanded_attn_mask = qu.expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
caches=None,
inputs_embeds=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
y_cache = y_cache if y_cache is not None else self.config.y_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
# past_key_values_length
past_key_values_length = caches[0][0].shape[2] if caches is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if enc_hiddens is not None and encoder_attention_mask is not None:
encoder_attention_mask = qu.expand_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hiddens = inputs_embeds + positions
hiddens = self.layernorm_embedding(hiddens)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and enc_hiddens is not None) else None
next_decoder_cache = () if y_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip(
[head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]
):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hiddens,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = caches[idx] if caches is not None else None
if self.gradient_checkpointing and self.training:
if y_cache:
log.warning(
"`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..."
)
y_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, y_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hiddens,
attention_mask,
enc_hiddens,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hiddens,
attention_mask=attention_mask,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
y_cache=y_cache,
)
hiddens = layer_outputs[0]
if y_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if enc_hiddens is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hiddens,)
next_cache = next_decoder_cache if y_cache else None
if not return_dict:
return tuple(
v
for v in [
hiddens,
next_cache,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if v is not None
)
return qo.CachesCrosses(
y=hiddens,
caches=next_cache,
hiddens=all_hidden_states,
attns=all_self_attns,
crosses=all_cross_attentions,
)
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
padding_idx, s_vocab = config.PAD, config.s_vocab
self.shared = qc.Embed(s_vocab, config.d_model, padding_idx)
self.encoder = Encoder(config, self.shared)
self.decoder = Decoder(config, self.shared)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
caches=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
y_cache = y_cache if y_cache is not None else self.config.y_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# different to other models, PLBart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.PAD)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
y=encoder_outputs[0],
hiddens=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attns=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
enc_hiddens=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
caches=caches,
inputs_embeds=decoder_inputs_embeds,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
y=decoder_outputs.y,
caches=decoder_outputs.caches,
hiddens=decoder_outputs.hiddens,
attns=decoder_outputs.attns,
crosses=decoder_outputs.crosses,
enc_y=encoder_outputs.y,
enc_hiddens=encoder_outputs.hiddens,
enc_attns=encoder_outputs.attns,
)
class ForCondGen(PreTrained):
def __init__(self, config):
super().__init__(config)
self.model = Model(config)
self.register_buffer(
"final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))
)
self.lm_head = qc.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
caches=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(labels, self.config.PAD)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
caches=caches,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.s_vocab), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
caches=outputs.caches,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
enc_y=outputs.enc_y,
enc_hiddens=outputs.enc_hiddens,
enc_attns=outputs.enc_attns,
)
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(cfg.d_model, "tanh", **kw)
forward = qf.forward_seq
def pre_proj(self, x, ys):
y = ys[0]
eos_m = x.eq(self.cfg.EOS)
assert len(torch.unique_consecutive(eos_m.sum(1))) <= 1
y = y[eos_m, :].view(y.size(0), -1, y.size(-1))
return y[:, -1, :]
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PLBart
class PLBartDecoderWrapper(PreTrained):
def __init__(self, config):
super().__init__(config)
self.decoder = Decoder(config)
def forward(self, *args, **kw):
return self.decoder(*args, **kw)
# Copied from transformers.models.bart.modeling_bart.ForCausal with Bart->PLBart, facebook/bart-base->uclanlp/plbart-base
class ForCausal(PreTrained):
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_enc_dec = False
super().__init__(config)
self.model = PLBartDecoderWrapper(config)
self.lm_head = qc.Linear(config.d_model, config.s_vocab, bias=False)
def forward(
self,
input_ids=None,
attention_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
caches=None,
inputs_embeds=None,
labels=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
caches=caches,
inputs_embeds=inputs_embeds,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.s_vocab), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
caches=outputs.caches,
hiddens=outputs.hiddens,
attns=outputs.attns,
crosses=outputs.crosses,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,521
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/debugger/torch_wrapper.py
|
try:
import torch as _torch
except ImportError:
_torch = None
class TorchWrapper:
"""
Helps in making torch an optional dependency
"""
def __getattr__(self, name):
if _torch is None:
raise ImportError("Triton requires PyTorch to be installed")
return getattr(_torch, name)
torch = TorchWrapper()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,522
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/reformer.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import sys
import torch
import torch.utils.checkpoint
from collections import namedtuple
from functools import reduce
from operator import mul
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import forward as qf
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
from torch.autograd.function import Function
from torch.nn import CrossEntropyLoss
from ...pytorch_utils import apply_chunking_to_forward
log = logging.get_logger(__name__)
LIST = [
"google/reformer-crime-and-punishment",
"google/reformer-enwik8",
]
LSHSelfAttentionOutput = namedtuple(
"LSHSelfAttentionOutput", ["hiddens", "attention_probs", "buckets"]
)
LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hiddens", "attention_probs"])
AttentionOutput = namedtuple("AttentionOutput", ["hiddens", "attention_probs", "buckets"])
ReformerOutput = namedtuple(
"ReformerOutput", ["hiddens", "attn_output", "attention_probs", "buckets"]
)
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput",
["attn_output", "hiddens", "grad_attn_output", "grad_model_states"],
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput",
["hiddens", "all_hidden_states", "all_attentions", "caches"],
)
def _stable_argsort(vector, dim):
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_least_common_mult_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
def _get_min_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
class AxialPositionEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.drop = config.drop
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
if sum(self.axial_pos_embds_dim) != config.d_model:
raise ValueError(
f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to "
f"config.d_model: {config.d_model}"
)
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [
weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:])
for weight in self.weights
]
if self.training is True:
if reduce(mul, self.axial_pos_shape) != sequence_length:
raise ValueError(
f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to "
f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. "
f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} "
"or changing config.axial_pos_shape."
)
if self.drop > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
# permute weights so that 2D correctly drops dims 1 and 2
transposed_weights = weights.transpose(2, 1)
# drop entire matrix of last two dims (prev dims 1 and 2)
dropped_transposed_weights = F.dropout2d(
transposed_weights, p=self.drop, training=self.training
)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(
dropped_weights, (batch_size, sequence_length, -1)
)
else:
position_encodings = torch.cat(
[
torch.reshape(weight, (batch_size, sequence_length, -1))
for weight in broadcasted_weights
],
dim=-1,
)
else:
if reduce(mul, self.axial_pos_shape) < sequence_length:
raise ValueError(
f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to "
f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, "
f"{self.least_common_mult_chunk_length})."
)
# compute how many columns are needed
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
# cut to columns that are needed
position_encodings = torch.cat(
[weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights],
dim=-1,
)
position_encodings = torch.reshape(
position_encodings, (batch_size, -1, position_encodings.shape[-1])
)
# select correct position encodings
position_encodings = torch.cat(
[
torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)
for i in range(batch_size)
],
dim=0,
)
return position_encodings
class PositionEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.drop = config.drop
self.embedding = qc.Embed(config.n_pos, config.d_model)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = F.drop(position_embeddings, p=self.drop, training=self.training)
return position_embeddings
class ReformerEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.n_pos = config.n_pos
self.drop = config.drop
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model)
self.position_embeddings = (
AxialPositionEmbeddings(config)
if config.axial_pos_embds
else PositionEmbeddings(config)
)
def forward(
self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0
):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
start_idx_pos_encodings,
start_idx_pos_encodings + seq_length,
dtype=torch.long,
device=device,
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if position_ids.shape[-1] > self.n_pos:
raise ValueError(
f"Sequence Length: {position_ids.shape[-1]} has to be less or equal than "
f"config.n_pos {self.n_pos}."
)
# drop
embeddings = F.drop(inputs_embeds, p=self.drop, training=self.training)
# add positional embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class EfficientAttentionMixin:
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(
self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None
):
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError(
f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}"
)
class LSHSelfAttention(qc.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.n_pos = config.n_pos
self.drop = config.lsh_attention_probs_dropout_prob
self.n_heads = config.n_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.n_heads * self.attention_head_size
self.d_model = config.d_model
# projection matrices
self.query_key = qc.Linear(self.d_model, self.all_head_size, bias=False)
self.value = qc.Linear(self.d_model, self.all_head_size, bias=False)
# save mask value here. Need fp32 and fp16 mask values
self.register_buffer("self_mask_value_float16", torch.tensor(-1e3))
self.register_buffer("self_mask_value_float32", torch.tensor(-1e5))
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
num_hashes=None,
buckets=None,
caches=None,
y_cache=False,
output_attentions=False,
**kw,
):
sequence_length = hiddens.shape[1]
batch_size = hiddens.shape[0]
# num hashes can optionally be overwritten by user
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
do_cached_attention = y_cache and caches[1] is not None
# check if cache shall be used and that hidden states are already cached
if do_cached_attention:
assert sequence_length == 1
past_buckets = caches[0]
past_states = caches[1]
# get query vector
query_vectors = self.query_key(hiddens)
query_vectors = self._split_hidden_size_dim(
query_vectors, self.n_heads, self.attention_head_size
)
if past_buckets is not None:
(
key_value_hidden_states,
sorted_bucket_idx,
buckets,
) = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hiddens=hiddens,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
num_hashes,
-1,
self.n_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
num_hashes,
-1,
self.n_heads,
self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hiddens], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hiddens to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hiddens)
value_vectors = self.value(hiddens)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_dim(
query_key_vectors, self.n_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.n_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if (
do_cached_attention
and past_buckets is None
and key_value_hidden_states.shape[1] >= self.chunk_length
):
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hiddens
assert query_key_vectors.shape[-1] == self.attention_head_size
assert value_vectors.shape[-1] == self.attention_head_size
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
y_cache and caches[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.n_heads, num_hashes * sequence_length)
assert int(buckets.shape[-1]) == num_hashes * sequence_length
(
sorted_bucket_idx,
undo_sorted_bucket_idx,
) = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(
query_key_vectors, sorted_bucket_idx_per_hash, num_hashes
)
value_vectors = self._gather_by_expansion(
value_vectors, sorted_bucket_idx_per_hash, num_hashes
)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.n_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.n_heads,
self.attention_head_size,
)
if self.chunk_length is None:
assert self.num_chunks_before == 0 and self.num_chunks_after == 0
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(
sequence_length, device=query_key_vectors.device
).repeat(batch_size, self.n_heads, 1)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(
out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx
)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.n_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.n_heads,
self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.n_heads,
sequence_length,
self.attention_head_size,
)
out_vectors = self._merge_hidden_size_dims(
out_vectors, self.n_heads, self.attention_head_size
)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.n_heads, num_hashes, -1)
return LSHSelfAttentionOutput(
hiddens=out_vectors, attention_probs=attention_probs, buckets=buckets
)
def _query_per_attn_head(self, hiddens):
per_head_query_key = self.query_key.weight.reshape(
self.n_heads, self.attention_head_size, self.d_model
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hiddens, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hiddens):
per_head_value = self.value.weight.reshape(
self.n_heads, self.attention_head_size, self.d_model
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hiddens, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
if isinstance(self.num_buckets, int):
assert self.num_buckets % 2 == 0
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert bucket_factor % 2 == 0
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (
self.n_heads,
vectors.shape[-1],
num_hashes,
rotation_size // 2,
)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[
..., cur_sum : cur_sum + (bucket_factor // 2)
]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat(
[rotated_vectors_factor, -rotated_vectors_factor], dim=-1
)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (
attention_mask.sum().item() < batch_size * attention_mask.shape[-1]
):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask,
buckets,
torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device),
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.n_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
self, sequence_length, buckets, num_hashes
):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2**num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.n_pos // self.chunk_length) ** (0.5)),
self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [
2 ** (num_buckets_pow_2 // 2),
2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2),
]
log.warning(
f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}..."
)
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(
key_vectors, self.num_chunks_before, self.num_chunks_after
)
value_vectors = self._look_adjacent(
value_vectors, self.num_chunks_before, self.num_chunks_after
)
# get logits and dots
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.n_heads
)
key_value_bucket_idx = self._look_adjacent(
query_bucket_idx, self.num_chunks_before, self.num_chunks_after
)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,))
* key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[
:, :, :, -1
]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del query_key_dots
# drop
attention_probs = F.drop(attention_probs, p=self.drop, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
def _compute_attn_mask(
self,
query_indices,
key_indices,
attention_mask,
query_key_dot_shape,
do_standard_self_attention,
):
# attention mask for LSH
if attention_mask is not None:
# if chunked attention, the attention mask has to correspond to LSH order
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
# expand attn_mask to fit with key_value_bucket_idx shape
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
# extract attention mask from LSH sorted key_indices
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(
query_indices.device
)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(
self, query_vectors, attention_mask, num_hashes, hiddens, past_states, past_buckets
):
# concat hidden states
hiddens = torch.cat([past_states, hiddens], dim=1)
# batch_size hidden
batch_size = hiddens.shape[0]
sequence_length = hiddens.shape[1]
# check if cached buckets include pad bucket
max_bucket = (
self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
)
# if pad bucket was cached => need to increase num buckets for caching
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
# retrieve query buckets
query_buckets = self._hash_vectors(
query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets
)
# concat buckets
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
# hash-based sort
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
# bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength
assert bucket_idx.shape == (
batch_size,
self.n_heads,
num_hashes,
sequence_length,
)
# find indices of new bucket indices
relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()
# expand relevant bucket indices to its chunks
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(
relevant_bucket_idx, sequence_length
)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
# adapt bucket_idx for batch and hidden states for index select
bucket_idx_batch_offset = sequence_length * (
batch_size
* torch.arange(
relevant_bucket_idx_chunk.shape[-1], device=hiddens.device, dtype=torch.long
)
// relevant_bucket_idx_chunk.shape[-1]
)
# add batch offset
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hiddens = hiddens.reshape((-1, self.d_model))
# select all relevant hidden states
relevant_hidden_states = hiddens.index_select(0, relevant_bucket_idx_chunk_all_batch)
# reshape hidden states and bucket_idx to correct output
relevant_hidden_states = relevant_hidden_states.reshape(
batch_size, self.n_heads, -1, self.d_model
)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(
batch_size, self.n_heads, num_hashes, -1
)
assert (
relevant_hidden_states.shape[2]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes
)
assert (
relevant_bucket_idx_chunk.shape[-1]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length
)
return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
# get relevant indices of where chunk starts and its size
start_indices_chunk = (
(indices[:, -1] // self.chunk_length) - self.num_chunks_before
) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
# expand start indices and add correct chunk offset via arange
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(
indices.shape[0], total_chunk_size
)
chunk_sequence_indices = expanded_start_indices + torch.arange(
total_chunk_size, device=indices.device, dtype=torch.long
).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
# make sure that circular logic holds via % seq len
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
# expand indices and set indices correctly
indices = (
indices.unsqueeze(1)
.expand((indices.shape[0], total_chunk_size, -1))
.flatten(0, 1)
.clone()
)
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors):
vectors = self._len_norm(vectors)
vectors = vectors * torch.rsqrt(
torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)
)
return vectors
def _len_norm(self, x, epsilon=1e-6):
variance = torch.mean(x**2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
class ReverseSort(Function):
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(
out_vectors.shape
)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class LocalSelfAttention(qc.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.n_heads = config.n_heads
self.chunk_length = config.local_attn_chunk_length
self.num_chunks_before = config.local_num_chunks_before
self.num_chunks_after = config.local_num_chunks_after
self.is_decoder = config.is_decoder
self.PAD = config.PAD
self.attention_head_size = config.attention_head_size
self.all_head_size = self.n_heads * self.attention_head_size
self.d_model = config.d_model
# projection matrices
self.query = qc.Linear(self.d_model, self.all_head_size, bias=False)
self.key = qc.Linear(self.d_model, self.all_head_size, bias=False)
self.value = qc.Linear(self.d_model, self.all_head_size, bias=False)
self.drop = config.local_attention_probs_dropout_prob
# save mask value here
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
caches=None,
y_cache=False,
output_attentions=False,
**kw,
):
sequence_length = hiddens.shape[1]
batch_size = hiddens.shape[0]
# check if cache shall be used and that hidden states are already cached
if y_cache and caches[1] is not None:
assert caches[0] is None
key_value_hidden_states = self._retrieve_relevant_hidden_states(
caches[1], self.chunk_length, self.num_chunks_before
)
key_value_hidden_states = torch.cat([key_value_hidden_states, hiddens], dim=1)
# only query vector for last token
query_vectors = self.query(hiddens)
# compute key and value for relevant chunk
key_vectors = self.key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
# free memory
del key_value_hidden_states
else:
# project hiddens to query, key and value
query_vectors = self.query(hiddens)
key_vectors = self.key(hiddens)
value_vectors = self.value(hiddens)
# split last dim into `config.n_heads` and `config.attention_head_size`
query_vectors = self._split_hidden_size_dim(
query_vectors, self.n_heads, self.attention_head_size
)
key_vectors = self._split_hidden_size_dim(
key_vectors, self.n_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.n_heads, self.attention_head_size
)
assert query_vectors.shape[-1] == self.attention_head_size
assert key_vectors.shape[-1] == self.attention_head_size
assert value_vectors.shape[-1] == self.attention_head_size
if self.chunk_length is None:
assert self.num_chunks_before == 0 and self.num_chunks_after == 0
# normalize key vectors
key_vectors = key_vectors / torch.sqrt(
torch.tensor(
self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype
)
)
# get sequence length indices
indices = torch.arange(sequence_length, device=query_vectors.device).repeat(
batch_size, self.n_heads, 1
)
# if one should do normal n^2 self-attention
do_standard_self_attention = sequence_length <= self.chunk_length
# if input should be chunked
if not do_standard_self_attention:
# chunk vectors
# B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size
query_vectors = self._split_seq_length_dim_to(
query_vectors,
-1,
self.chunk_length,
self.n_heads,
self.attention_head_size,
)
key_vectors = self._split_seq_length_dim_to(
key_vectors,
-1,
self.chunk_length,
self.n_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.n_heads,
self.attention_head_size,
)
# chunk indices
query_indices = self._split_seq_length_dim_to(
indices, -1, self.chunk_length, self.n_heads
)
key_indices = self._split_seq_length_dim_to(
indices, -1, self.chunk_length, self.n_heads
)
# append chunks before and after
key_vectors = self._look_adjacent(
key_vectors, self.num_chunks_before, self.num_chunks_after
)
value_vectors = self._look_adjacent(
value_vectors, self.num_chunks_before, self.num_chunks_after
)
key_indices = self._look_adjacent(
key_indices, self.num_chunks_before, self.num_chunks_after
)
else:
query_indices = key_indices = indices
# query-key matmul: QK^T
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
mask = self._compute_attn_mask(
query_indices,
key_indices,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
# get mask tensor depending on half precision or not
if query_key_dots.dtype == torch.float16:
mask_value = self.mask_value_float16.half()
else:
mask_value = self.mask_value_float32
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# softmax
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del logits
# drop
attention_probs = F.drop(attention_probs, p=self.drop, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if not do_standard_self_attention:
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
assert out_vectors.shape == (
batch_size,
self.n_heads,
sequence_length,
self.attention_head_size,
)
out_vectors = self._merge_hidden_size_dims(
out_vectors, self.n_heads, self.attention_head_size
)
if output_attentions is False:
attention_probs = ()
return LocalSelfAttentionOutput(hiddens=out_vectors, attention_probs=attention_probs)
def _compute_attn_mask(
self,
query_indices,
key_indices,
attention_mask,
query_key_dots_shape,
do_standard_self_attention,
):
# chunk attention mask and look before and after
if attention_mask is not None:
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
attention_mask = self._split_seq_length_dim_to(
attention_mask, -1, self.chunk_length, 1
)
attention_mask = self._look_adjacent(
attention_mask, self.num_chunks_before, self.num_chunks_after
)
# create attn_mask
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(
query_indices.device
)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
start_position = (
(previous_hidden_states.shape[1] // chunk_length) - num_chunks_before
) * chunk_length
return previous_hidden_states[:, start_position:]
class ReformerSelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.n_heads * config.attention_head_size
self.drop = config.drop
self.dense = qc.Linear(all_head_size, config.d_model, bias=False)
def forward(self, hiddens):
hiddens = self.dense(hiddens)
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
return hiddens
class Attention(qc.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = qc.LayerNorm(config.d_model, eps=config.eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh":
self.self_attention = LSHSelfAttention(config)
elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local":
self.self_attention = LocalSelfAttention(config)
elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set(["lsh", "local"]):
# get correct attn layers
if self.attn_layers[self.layer_id] == "lsh":
self.self_attention = LSHSelfAttention(config)
else:
self.self_attention = LocalSelfAttention(config)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. "
"Select attn layer types from ['lsh', 'local'] only."
)
self.output = ReformerSelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
num_hashes=None,
caches=None,
y_cache=False,
orig_sequence_length=None,
output_attentions=False,
buckets=None,
):
hiddens = self.layer_norm(hiddens)
# make sure cached hidden states is set to None for backward pass
if caches is not None:
caches_layer = caches[self.layer_id]
else:
caches_layer = None
# use cached buckets for backprob if buckets not None for LSHSelfAttention
self_attention_outputs = self.self_attention(
hiddens=hiddens,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
caches=caches_layer,
y_cache=y_cache,
output_attentions=output_attentions,
buckets=buckets,
)
# add buckets if necessary
if hasattr(self_attention_outputs, "buckets"):
buckets = self_attention_outputs.buckets
else:
buckets = None
# cache hidden states for future use
if y_cache:
if caches[self.layer_id][0] is None:
# padded input should not be cached
past_buckets = (
buckets[:, :, :, :orig_sequence_length]
if (buckets is not None and orig_sequence_length > 1)
else buckets
)
else:
past_buckets = torch.cat([caches[self.layer_id][0], buckets], dim=-1)
if caches[self.layer_id][1] is None:
# padded input should not be cached
past_states = hiddens[:, :orig_sequence_length]
else:
past_states = torch.cat([caches[self.layer_id][1], hiddens], dim=1)
caches[self.layer_id] = (past_buckets, past_states)
# compute attention feed forward output
attention_output = self.output(self_attention_outputs.hiddens)
return AttentionOutput(
hiddens=attention_output,
attention_probs=self_attention_outputs.attention_probs,
buckets=buckets,
)
class ReformerFeedForwardDense(qc.Module):
def __init__(self, cfg):
super().__init__()
self.drop = cfg.drop
self.act = qu.activation(cfg.act)
self.dense = qc.Linear(cfg.d_model, cfg.feed_forward_size)
def forward(self, x):
y = self.dense(x)
y = F.drop(y, p=self.drop, training=self.training)
y = self.act(y)
return y
class ReformerFeedForwardOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.drop = config.drop
self.dense = qc.Linear(config.feed_forward_size, config.d_model)
def forward(self, x):
y = self.dense(x)
y = F.drop(y, p=self.drop, training=self.training)
return y
class ChunkReformerFeedForward(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
def forward_chunk(self, hiddens):
hiddens = self.layer_norm(hiddens)
hiddens = self.dense(hiddens)
return self.output(hiddens)
class Layer(qc.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = Attention(config, layer_id)
# drop requires to have the same
# seed for forward and backward pass
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(
self,
prev_attn_output,
hiddens,
attention_mask=None,
head_mask=None,
num_hashes=None,
caches=None,
y_cache=False,
orig_sequence_length=None,
output_attentions=False,
):
with torch.no_grad():
if self.training:
self._init_attention_seed()
attn_outputs = self.attention(
hiddens=hiddens,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
caches=caches,
y_cache=y_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = attn_outputs.hiddens
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# Y_1 = X_1 + f(X_2)
attn_output = prev_attn_output + attn_output
# free memory
del prev_attn_output
# every forward pass we sample a different seed
# for drop and save seed for forward fn in backward
# to have correct drop
if self.training:
self._init_feed_forward_seed()
# Y_2 = X_2 + g(Y_1)
hiddens = hiddens + self.feed_forward(attn_output)
return ReformerOutput(
attn_output=attn_output,
hiddens=hiddens,
attention_probs=attn_outputs.attention_probs,
buckets=attn_outputs.buckets,
)
def backward_pass(
self,
next_attn_output,
hiddens,
grad_attn_output,
grad_model_states,
attention_mask=None,
head_mask=None,
buckets=None,
):
assert self.training
with torch.enable_grad():
next_attn_output.requires_grad = True
# set seed to have correct drop
torch.manual_seed(self.feed_forward_seed)
# g(Y_1)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_model_states, retain_graph=True)
with torch.no_grad():
# X_2 = Y_2 - g(Y_1)
hiddens = hiddens - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hiddens.requires_grad = True
# set seed to have correct drop
torch.manual_seed(self.attention_seed)
# f(X_2)
# use cached buckets for backprob if buckets not None for LSHSelfAttention
output = self.attention(
hiddens=hiddens,
head_mask=head_mask,
attention_mask=attention_mask,
buckets=buckets,
).hiddens
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
# X_1 = Y_1 - f(X_2)
attn_output = next_attn_output - output
del output, next_attn_output
grad_model_states = grad_model_states + hiddens.grad
hiddens.grad = None
hiddens = hiddens.detach()
return ReformerBackwardOutput(
attn_output=attn_output,
hiddens=hiddens,
grad_attn_output=grad_attn_output,
grad_model_states=grad_model_states,
)
class _ReversibleFunction(Function):
@staticmethod
def forward(
ctx,
hiddens,
layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
caches,
y_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
):
all_buckets = ()
# split duplicated tensor
hiddens, attn_output = torch.chunk(hiddens, 2, dim=-1)
for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):
if output_hidden_states is True:
all_hidden_states.append(hiddens)
layer_outputs = layer(
prev_attn_output=attn_output,
hiddens=hiddens,
attention_mask=attention_mask,
head_mask=layer_head_mask,
num_hashes=num_hashes,
caches=caches,
y_cache=y_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = layer_outputs.attn_output
hiddens = layer_outputs.hiddens
all_buckets = all_buckets + (layer_outputs.buckets,)
if output_attentions:
all_attentions.append(layer_outputs.attention_probs)
# Add last layer
if output_hidden_states is True:
all_hidden_states.append(hiddens)
# attach params to ctx for backward
ctx.save_for_backward(attn_output.detach(), hiddens.detach())
ctx.layers = layers
ctx.all_buckets = all_buckets
ctx.head_mask = head_mask
ctx.attention_mask = attention_mask
# Concatenate 2 RevNet outputs
return torch.cat([attn_output, hiddens], dim=-1)
@staticmethod
def backward(ctx, grad_model_states):
grad_attn_output, grad_model_states = torch.chunk(grad_model_states, 2, dim=-1)
# retrieve params from ctx for backward
attn_output, hiddens = ctx.saved_tensors
# create tuple
output = ReformerBackwardOutput(
attn_output=attn_output,
hiddens=hiddens,
grad_attn_output=grad_attn_output,
grad_model_states=grad_model_states,
)
# free memory
del grad_attn_output, grad_model_states, attn_output, hiddens
layers = ctx.layers
all_buckets = ctx.all_buckets
head_mask = ctx.head_mask
attention_mask = ctx.attention_mask
for idx, layer in enumerate(layers[::-1]):
# pop last buckets from stack
buckets = all_buckets[-1]
all_buckets = all_buckets[:-1]
# backprop
output = layer.backward_pass(
next_attn_output=output.attn_output,
hiddens=output.hiddens,
grad_attn_output=output.grad_attn_output,
grad_model_states=output.grad_model_states,
head_mask=head_mask[len(layers) - idx - 1],
attention_mask=attention_mask,
buckets=buckets,
)
assert all_buckets == (), "buckets have to be empty after backpropagation"
grad_model_states = torch.cat([output.grad_attn_output, output.grad_model_states], dim=-1)
# num of return vars has to match num of forward() args
# return gradient for hiddens arg and None for other args
return grad_model_states, None, None, None, None, None, None, None, None, None, None, None
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.drop = config.drop
self.layers = nn.ModuleList([Layer(config, i) for i in range(config.n_lays)])
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * d_model
self.layer_norm = qc.LayerNorm(2 * config.d_model, eps=config.eps)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
num_hashes=None,
caches=None,
y_cache=False,
orig_sequence_length=None,
output_hidden_states=False,
output_attentions=False,
):
# hiddens and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
# init cached hidden states if necessary
if caches is None:
caches = [((None), (None)) for i in range(len(self.layers))]
# concat same tensor for reversible ResNet
hiddens = torch.cat([hiddens, hiddens], dim=-1)
hiddens = _ReversibleFunction.apply(
hiddens,
self.layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
caches,
y_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hiddens = self.layer_norm(hiddens)
# Apply drop
hiddens = F.drop(hiddens, p=self.drop, training=self.training)
return ReformerEncoderOutput(
hiddens=hiddens,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
caches=caches,
)
class ReformerOnlyLMHead(qc.Module):
def __init__(self, config):
super().__init__()
self.seq_len_dim = 1
self.chunk_size_lm_head = config.chunk_size_lm_head
self.decoder = qc.Linear(2 * config.d_model, config.s_vocab, bias=False)
self.bias = nn.Parameter(torch.zeros(config.s_vocab))
self.decoder.bias = self.bias
def forward(self, hiddens):
return apply_chunking_to_forward(
self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hiddens
)
def forward_chunk(self, hiddens):
hiddens = self.decoder(hiddens)
return hiddens
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.config = config
assert self.config.n_lays > 0
self.embeddings = ReformerEmbeddings(config)
self.encoder = Encoder(config)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
caches=None,
y_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
y_cache = y_cache if y_cache is not None else self.config.y_cache
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # noqa: F841
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1] # noqa: F841
device = inputs_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert len(input_shape) == 2
if caches is not None:
assert not self.training
# prepare head mask
head_mask = self.get_head_mask(head_mask, self.config.n_lays, is_attention_chunked=True)
# original sequence length for padding
orig_sequence_length = input_shape[-1]
# if needs padding
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = (
input_shape[-1] % least_common_mult_chunk_length != 0
and input_shape[-1] > min_chunk_length
and caches is None
)
if must_pad_to_match_chunk_length:
padding_length = (
least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
)
if self.training is True:
raise ValueError(
f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple "
f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length "
f"of {input_shape[-1] + padding_length}."
)
# pad input
(
input_ids,
inputs_embeds,
attention_mask,
position_ids,
input_shape,
) = self._pad_to_mult_of_chunk_length(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
input_shape=input_shape,
padding_length=padding_length,
padded_seq_length=least_common_mult_chunk_length,
device=device,
)
# start index for position encoding depends on incremental decoding
if caches is not None:
start_idx_pos_encodings = caches[0][1].shape[1]
else:
start_idx_pos_encodings = 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
start_idx_pos_encodings=start_idx_pos_encodings,
)
encoder_outputs = self.encoder(
hiddens=embedding_output,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
caches=caches,
y_cache=y_cache,
orig_sequence_length=orig_sequence_length,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
sequence_output = encoder_outputs.hiddens
# if padding was applied
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
caches = encoder_outputs.caches if y_cache else None
hiddens = encoder_outputs.all_hidden_states if output_hidden_states else None
attns = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple(v for v in [sequence_output, caches, hiddens, attns] if v is not None)
return qo.WithCaches(
y=sequence_output,
caches=caches,
hiddens=hiddens,
attns=attns,
)
def _pad_to_mult_of_chunk_length(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
input_shape=None,
padding_length=None,
padded_seq_length=None,
device=None,
):
log.info(
f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a "
f"multiple of `config.chunk_length`: {padded_seq_length}"
)
padded_input_ids = torch.full(
(input_shape[0], padding_length),
self.config.PAD,
device=device,
dtype=torch.long,
)
# Extend `attention_mask`
if attention_mask is not None:
pad_attention_mask = torch.zeros(
input_shape[0], padding_length, device=device, dtype=attention_mask.dtype
)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat(
[
torch.ones(input_shape, device=device, dtype=torch.uint8),
torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),
],
dim=-1,
)
# Extend `input_ids` with padding to match least common multiple chunk_length
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
# Pad position ids if given
if position_ids is not None:
padded_position_ids = torch.arange(
input_shape[-1], padded_seq_length, dtype=torch.long, device=device
)
padded_position_ids = position_ids.unsqueeze(0).expand(
input_shape[0], padding_length
)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
# Extend `inputs_embeds` with padding to match least common multiple chunk_length
if inputs_embeds is not None:
padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return input_ids, inputs_embeds, attention_mask, position_ids, input_shape
class ReformerModelWithLMHead(PreTrained):
def __init__(self, config):
super().__init__(config)
assert config.is_decoder
assert "local" not in self.config.attn_layers or config.local_num_chunks_after == 0
assert "lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0
self.reformer = Model(config)
self.lm_head = ReformerOnlyLMHead(config)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
caches=None,
y_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
labels=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
caches=caches,
y_cache=y_cache,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, self.config.s_vocab), shift_labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return qo.LossCaches(
loss=loss,
logits=logits,
caches=reformer_outputs.caches,
hiddens=reformer_outputs.hiddens,
attns=reformer_outputs.attns,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
assert not cfg.is_decoder
self.model = Model(**kw)
self.proj = ReformerOnlyLMHead(**kw)
forward = qf.forward_masked
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(cfg.d_model, "tanh", **kw, d_model=2 * cfg.d_model)
forward = qf.forward_seq
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,523
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/old/params.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as T
from absl import flags as F
from qnarre.core import utils as U
def load_flags():
# from official.utils.flags import core as fu
# fu.define_base()
# fu.define_performance(
# all_reduce_alg=True,
# dtype=False,
# inter_op=False,
# intra_op=False,
# max_train_steps=False,
# num_parallel_calls=False,
# synthetic_data=True,
# )
# fu.define_image()
# fu.define_benchmark()
# F.adopt_module_key_flags(fu)
F.DEFINE_bool("do_eval", None, "")
F.DEFINE_bool("do_train", None, "")
F.DEFINE_float("stop_threshold", None, "")
F.DEFINE_float("train_epochs", None, "")
F.DEFINE_integer("batch_size", None, "")
F.DEFINE_integer("checkpoint_steps", None, "")
F.DEFINE_integer("epochs_between_evals", None, "")
F.DEFINE_integer("eval_batch_size", None, "")
F.DEFINE_integer("eval_steps", None, "")
F.DEFINE_integer("iters_per_loop", None, "")
F.DEFINE_integer("train_steps", None, "")
F.DEFINE_integer("warmup_steps", None, "")
F.DEFINE_string("dir_data", None, "")
# F.DEFINE_string('log_dir', None, '')
F.DEFINE_string("dir_model", None, "")
F.DEFINE_string("model", None, "")
F.DEFINE_string("dir_save", None, "")
df = ["channels_first", "channels_last"]
F.DEFINE_enum("data_format", None, df, "")
def load_params():
f = "channels_first" if T.test.is_built_with_cuda() else "channels_last"
return U.Params(_params, data_format=F.FLAGS.data_format or f)
_params = dict(
layout=None,
features=None,
)
_params2 = dict(
epochs_between_evals=None,
# len_bucket_step=1.1,
# vocab_divisor=1,
adam_beta1=0.9,
adam_beta2=0.997, # 0.999
adam_eps=1e-9, # 1e-6
adamw_decay=0.0,
add_relative=False,
all_reduce_alg=None,
alpha=0.6,
attn_bdims="",
attn_type="dot_attn",
beam_size=4,
causal_self_attn=True,
clip_grad_norm=2.0, # 0.0 no gradient clipping,
compress_steps=0,
conv_first_kernel=3,
daisy_chain_vars=True,
data_format=None,
dataset=None,
dist_strategy=None,
drop_long_seqs=False,
ds_src_len=0,
ds_tgt_len=0,
dtype=None,
eval_frequency=100,
eval_steps=1,
extra_decode_len=50,
factored_logits=False,
ffn_bdims="",
ffn_layer="dense_dense",
fixed_batch_size=False,
full_predict=False,
gpu_thread_mode=None,
grad_noise_scale=0.0,
group_eps=1e-5,
heads_share_embed=False,
init_gain=1.5, # 1.0
initializer="uniform_unit_scaling", # 'orthogonal',
input_frames=1,
inter_op=None,
intra_op=None,
kernel_height=3,
kernel_width=1,
label_smoothing=0.1,
learn_rate=2e-4, # 2.0, squad 5e-6,
loss_scale=None,
lr_constant=0.1,
lr_schedule="constant*linear_warmup*rsqrt_decay",
lr_warmup_steps=200,
max_position=0,
max_train_steps=None,
min_len_bucket=8,
min_len=0,
mixed_precision_loss=32768,
mixed_precision_loss_scaler="exponential",
mlm_preds=20,
mlm_prob=0.15,
model=None,
multiply_mode="sqrt_depth",
no_data_parallel=False,
norm_eps=1e-6,
norm_type="layer", # 'batch', layer', 'noam', 'none'.
num_gpu=None,
n_groups=8,
num_parallel_calls=None,
num_sampled_classes=0,
opt_multistep_accumulate_steps=None,
opt_zero_grads=False,
overload_metric="",
pack_dataset=False,
pad_batch=False,
pad_remover=False, # True,
parallel_batches=None,
params_profile=None,
penalty=0.1,
post_cmd="dan",
pre_cmd="n",
prepend_mode="none",
prepost_bdims="",
private_threads=None,
prox_bias=False,
run_autoregressive=False,
sampl_gold_mixin_prob=0.5,
sampl_method="argmax", # 'argmax' or 'random'
sampl_prob=0.0,
sampl_temp=1.0,
sampl_warmup_steps=50000,
self_attn_type="dot_attn",
shared_embed=False,
shared_weights=True,
short_seq_prob=0.1,
shuffle_size=512,
split_tgts_chunk_len=0,
split_tgts_max_chunks=100,
split_to_len=0,
src_len=0,
steps_between_evals=None,
stop_threshold=None,
summarize_grads=False,
summarize_vars=False,
symbol_modality_shards=16,
synthetic_data=None,
target_frames=1,
tgt_len=0,
train_epochs=[],
train_steps=1000,
unidirectional_encoder=False,
use_custom_ops=True,
use_target_embed=True,
warm_start_from="",
warmup_steps=16000,
weight_decay=1e-6,
weight_noise=0.0,
weights_fn={},
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,524
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/tutorials/04-low-memory-dropout.py
|
"""
Low-Memory Dropout
==================
In this tutorial, you will write a memory-efficient implementation of dropout whose state
will be composed of a single int32 seed. This differs from more traditional implementations of dropout,
whose state is generally composed of a bit mask tensor of the same shape as the input.
In doing so, you will learn about:
* The limitations of naive implementations of Dropout with PyTorch.
* Parallel pseudo-random number generation in Triton.
"""
# %%
# Baseline
# --------
#
# The *dropout* operator was first introduced in [SRIVASTAVA2014]_ as a way to improve the performance
# of deep neural networks in low-data regime (i.e. regularization).
#
# It takes a vector as input and produces a vector of the same shape as output. Each scalar in the
# output has a probability :math:`p` of being changed to zero and otherwise it is copied from the input.
# This forces the network to perform well even when only :math:`1 - p` scalars from the input are available.
#
# At evaluation time we want to use the full power of the network so we set :math:`p=0`. Naively this would
# increase the norm of the output (which can be a bad thing, e.g. it can lead to artificial decrease
# in the output softmax temperature). To prevent this we multiply the output by :math:`\frac{1}{1 - p}`, which
# keeps the norm consistent regardless of the dropout probability.
#
# Let's first take a look at the baseline implementation.
import tabulate
import torch
import triton
import triton.language as tl
@triton.jit
def _dropout(
x_ptr, # pointer to the input
x_keep_ptr, # pointer to a mask of 0s and 1s
output_ptr, # pointer to the output
n_elements, # number of elements in the `x` tensor
p, # probability that an element of `x` is changed to zero
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# Load data
x = tl.load(x_ptr + offsets, mask=mask)
x_keep = tl.load(x_keep_ptr + offsets, mask=mask)
# The line below is the crucial part, described in the paragraph above!
output = tl.where(x_keep, x / (1 - p), 0.0)
# Write-back output
tl.store(output_ptr + offsets, output, mask=mask)
def dropout(x, x_keep, p):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_dropout[grid](x, x_keep, output, n_elements, p, BLOCK_SIZE=1024)
return output
# Input tensor
x = torch.randn(size=(10,)).cuda()
# Dropout mask
p = 0.5
x_keep = (torch.rand(size=(10,)) > p).to(torch.int32).cuda()
#
output = dropout(x, x_keep=x_keep, p=p)
print(tabulate.tabulate([
["input"] + x.tolist(),
["keep mask"] + x_keep.tolist(),
["output"] + output.tolist()
]))
# %%
# Seeded dropout
# --------------
#
# The above implementation of dropout works fine, but it can be a bit awkward to deal with. Firstly
# we need to store the dropout mask for backpropagation. Secondly, dropout state management can get
# very tricky when using recompute/checkpointing (e.g. see all the notes about `preserve_rng_state` in
# https://pytorch.org/docs/1.9.0/checkpoint.html). In this tutorial we'll describe an alternative implementation
# that (1) has a smaller memory footprint; (2) requires less data movement; and (3) simplifies the management
# of persisting randomness across multiple invocations of the kernel.
#
# Pseudo-random number generation in Triton is simple! In this tutorial we will use the
# :code:`triton.language.rand` function which generates a block of uniformly distributed :code:`float32`
# values in [0, 1), given a seed and a block of :code:`int32` offsets. But if you need it, Triton also provides
# other :ref:`random number generation strategies <Random Number Generation>`.
#
# .. note::
# Triton's implementation of PRNG is based on the Philox algorithm (described on [SALMON2011]_).
#
# Let's put it all together.
@triton.jit
def _seeded_dropout(
x_ptr,
output_ptr,
n_elements,
p,
seed,
BLOCK_SIZE: tl.constexpr,
):
# compute memory offsets of elements handled by this instance
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# load data from x
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
# randomly prune it
random = tl.rand(seed, offsets)
x_keep = random > p
# write-back
output = tl.where(x_keep, x / (1 - p), 0.0)
tl.store(output_ptr + offsets, output, mask=mask)
def seeded_dropout(x, p, seed):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_seeded_dropout[grid](x, output, n_elements, p, seed, BLOCK_SIZE=1024)
return output
x = torch.randn(size=(10,)).cuda()
# Compare this to the baseline - dropout mask is never instantiated!
output = seeded_dropout(x, p=0.5, seed=123)
output2 = seeded_dropout(x, p=0.5, seed=123)
output3 = seeded_dropout(x, p=0.5, seed=512)
print(tabulate.tabulate([
["input"] + x.tolist(),
["output (seed = 123)"] + output.tolist(),
["output (seed = 123)"] + output2.tolist(),
["output (seed = 512)"] + output3.tolist()
]))
# %%
# Et Voilà! We have a triton kernel that applies the same dropout mask provided the seed is the same!
# If you'd like explore further applications of pseudorandomness in GPU programming, we encourage you
# to explore the `triton/language/random` folder!
# %%
# Exercises
# ---------
#
# 1. Extend the kernel to operate over a matrix and use a vector of seeds - one per row.
# 2. Add support for striding.
# 3. (challenge) Implement a kernel for sparse Johnson-Lindenstrauss transform which generates the projection matrix one the fly each time using a seed.
# %%
# References
# ----------
#
# .. [SALMON2011] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw, "Parallel Random Numbers: As Easy as 1, 2, 3", 2011
# .. [SRIVASTAVA2014] Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov, "Dropout: A Simple Way to Prevent Neural Networks from Overfitting", JMLR 2014
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,525
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/images.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import pathlib as pth
from wand.image import Image
from .counter import counters
from .base import config, num_to_name, lister
class Images:
kind = 'png'
export_args = ((('scanned', '.'), ('split', '+'), ('missing', '-'),
('resized', '>'), ('failed', 'F')), 'Splitting:')
@staticmethod
def rename_all(path):
for i, p in enumerate(sorted(lister(path)), start=1):
s = p.suffix
d = p.with_name(num_to_name(i)).with_suffix(s)
p.rename(d)
@classmethod
def filepath(cls, path, i):
return path / '{}.{}'.format(num_to_name(i), cls.kind)
def __init__(self, src, dst, width=500, blur=1):
self.src = src
self.dst = dst
self.width = width
# self.blur = 5 if dst.endswith(config.OPEN) else blur
self.blur = blur
def resize(self, dst, img, i=1):
s = img.sequence
if s and len(s) > 1:
for p in s:
i = p.index + 1
with Image(image=p) as p:
p.alpha_channel = False
w = self.width
h = int(w / (p.width / p.height))
p.resize(width=w, height=h, blur=self.blur)
f = self.filepath(dst, i)
p.save(filename=str(f))
yield f
else:
img.alpha_channel = False
w = self.width
h = int(w / (img.width / img.height))
img.resize(width=self.width, height=h, blur=self.blur)
f = self.filepath(dst, i)
img.save(filename=str(f))
yield f
def split_pdf(self, src, dst):
with Image(filename=str(src), resolution=300) as pdf:
with pdf.convert(self.kind) as img:
yield from self.resize(dst, img)
def resize_all(self, src, dst):
for i, f in enumerate(sorted(str(p) for p in lister(src)), start=1):
with Image(filename=f) as img:
with img.convert(self.kind) as img:
yield from self.resize(dst, img, i)
class Pngs(Images):
def export_all(self, ctxt, **kw):
kw.update(ctxt=ctxt)
with counters(self.export_args, kw) as cs:
for p in ctxt.sources.keys():
s = (self.src / p).with_suffix('.pdf')
if s.exists():
d = (self.dst / p).with_suffix('.slides')
if d.exists():
cs.incr('.')
else:
d.mkdir(parents=True, exist_ok=True)
for _ in self.split_pdf(s, d):
pass
cs.incr('+')
else:
cs.incr('-')
return cs
class Jpgs(Images):
kind = 'jpeg'
def __init__(self, src, dst, width=1000, blur=1):
super().__init__(src, dst, width, blur)
def export_all(self, ctxt, **kw):
kw.update(ctxt=ctxt)
src = self.src / 'pictures'
with counters(self.export_args, kw) as cs:
for p in ctxt.sources.keys():
s = (src / p).with_suffix('.pdf')
if s.exists():
d = (self.dst / p).with_suffix('.slides')
if d.exists():
cs.incr('.')
else:
d.mkdir(parents=True, exist_ok=True)
for _ in self.split_pdf(s, d):
pass
cs.incr('+')
continue
else:
s = s.with_suffix('')
if s.exists() and s.is_dir():
d = (self.dst / p).with_suffix('.slides')
if d.exists():
cs.incr('.')
else:
d.mkdir(parents=True, exist_ok=True)
for _ in self.resize_all(s, d):
pass
cs.incr('>')
continue
cs.incr('-')
return cs
class Orgs(Pngs):
org_frame = ('', '') # None
@classmethod
def frame(cls):
if not cls.org_frame:
t = pth.Path(config.web_templates + 'frame.org').read_text()
cls.org_frame = t.split(r'{% block frame_content %}')
return cls.org_frame
def __init__(self, src, dst, width=750, blur=1):
super().__init__(src, dst, width, blur)
def export_all(self, ctxt, **kw):
kw.update(ctxt=ctxt)
with counters(self.export_args, kw) as cs:
for c in ('affidavits', 'hearings'):
# 'exhibits', 'messages',
# 'pictures', 'reports', 'submissions', 'trials',
# 'discoveries', 'financial', 'letters', 'orders',
# 'services', 'transcripts'):
for s in lister(self.src / c, suffs=('.pdf', )):
# print(s)
d = (self.dst / s.relative_to(self.src)).with_suffix('')
if d.exists():
cs.incr('.')
else:
b = d.parent
d.mkdir(parents=True, exist_ok=True)
f, e = self.frame()
for p in self.split_pdf(s, d):
p = p.relative_to(b)
f += '#+NAME: {}\n[[./{}]]\n'.format(
p.stem, str(p))
d.with_suffix('.org').write_text(f + e)
cs.incr('+')
for s in lister(self.src / c, suffs=('.org', )):
d = (self.dst / s.relative_to(self.src))
try:
d.unlink()
except FileNotFoundError:
pass
d.symlink_to(os.path.relpath(s, d.parent))
return cs
if __name__ == '__main__':
cwd = pth.Path.cwd()
Images.rename_all(cwd)
with os.scandir(cwd) as es:
for e in es:
p = pth.Path(e.path)
if p.is_dir():
Images.rename_all(p)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,526
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/metric/glue.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import datasets as ds
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
class Glue(ds.Metric):
def _info(self):
assert self.config_name in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]
return ds.MetricInfo(
description="",
citation="",
inputs_description="",
features=ds.Features(
{
"predictions": ds.Value("int64" if self.config_name != "stsb" else "float32"),
"references": ds.Value("int64" if self.config_name != "stsb" else "float32"),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy",
)
def _compute(self, preds, refs):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(refs, preds)}
elif self.config_name == "stsb":
return _pearson_and_spearman(preds, refs)
elif self.config_name in ["mrpc", "qqp"]:
return _acc_and_f1(preds, refs)
else:
assert self.config_name in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"qnli",
"rte",
"wnli",
"hans",
]
return {"accuracy": _accuracy(preds, refs)}
def _accuracy(preds, refs):
return float((preds == refs).mean())
def _acc_and_f1(preds, refs):
acc = _accuracy(preds, refs)
f1 = float(f1_score(y_true=refs, y_pred=preds))
return {"accuracy": acc, "f1": f1}
def _pearson_and_spearman(preds, refs):
p = float(pearsonr(preds, refs)[0])
s = float(spearmanr(preds, refs)[0])
return {"pearson": p, "spearmanr": s}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,527
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/yoso.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import os
import torch
import torch.utils.checkpoint
from torch.nn import functional as F
from packaging import version
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import forward as qf
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.yoso import PreTrained
from ...pytorch_utils import (
apply_chunking_to_forward,
)
log = logging.get_logger(__name__)
LIST = [
"uw-madison/yoso-4096",
]
def load_cuda_kernels():
global lsh_cumulation
try:
from torch.utils.cpp_extension import load
def append_root(files):
src_folder = os.path.dirname(os.path.realpath(__file__))
return [os.path.join(src_folder, file) for file in files]
src_files = append_root(
[
"fast_lsh_cumulation_torch.cpp",
"fast_lsh_cumulation.cu",
"fast_lsh_cumulation_cuda.cu",
]
)
load("fast_lsh_cumulation", src_files, verbose=True)
import fast_lsh_cumulation as lsh_cumulation
return True
except Exception:
lsh_cumulation = None
return False
def to_contiguous(xs):
if isinstance(xs, list):
ys = []
for x in xs:
if not x.is_contiguous():
x = x.contiguous()
ys.append(x)
return ys
else:
if not xs.is_contiguous():
xs = xs.contiguous()
return xs
def normalize(xs):
if type(xs) is list:
ys = []
for x in xs:
ys.append(F.normalize(x, p=2, dim=-1))
return ys
else:
return F.normalize(xs, p=2, dim=-1)
def hashing(q, k, num_hash, hash_len):
if len(q.size()) != 3:
raise ValueError("Query has incorrect size.")
if len(k.size()) != 3:
raise ValueError("Key has incorrect size.")
rmat = torch.randn(q.size(0), q.size(2), num_hash * hash_len, device=q.device)
raise_pow = 2 ** torch.arange(hash_len, device=q.device)
q = torch.matmul(q, rmat).reshape(q.size(0), q.size(1), num_hash, hash_len)
k = torch.matmul(k, rmat).reshape(k.size(0), k.size(1), num_hash, hash_len)
q = (q > 0).int()
k = (k > 0).int()
y = torch.sum(q * raise_pow, dim=-1)
y = torch.sum(k * raise_pow, dim=-1)
return y.int(), y.int()
class YosoCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
hash_code_len = config["hash_code_len"]
expectation = (
1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi
) ** hash_code_len
expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
cumulation_value = torch.matmul(expectation, value)
ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value)
ctx.config = config
return cumulation_value
@staticmethod
def backward(ctx, grad):
grad = to_contiguous(grad)
query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors
config = ctx.config
hash_code_len = config["hash_code_len"]
weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key)
grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query)
grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
return None, None, grad_query, grad_key, grad_value, None
class YosoLSHCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
if query_mask.size(0) != key_mask.size(0):
raise ValueError("Query mask and Key mask differ in sizes in dimension 0")
if query_mask.size(0) != query.size(0):
raise ValueError("Query mask and Query differ in sizes in dimension 0")
if query_mask.size(0) != key.size(0):
raise ValueError("Query mask and Key differ in sizes in dimension 0")
if query_mask.size(0) != value.size(0):
raise ValueError("Query mask and Value mask differ in sizes in dimension 0")
if key.size(1) != value.size(1):
raise ValueError("Key and Value differ in sizes in dimension 1")
if query.size(2) != key.size(2):
raise ValueError("Query and Key differ in sizes in dimension 2")
query_mask, key_mask, query, key, value = to_contiguous(
[query_mask, key_mask, query, key, value]
)
use_cuda = query_mask.is_cuda
num_hash = config["num_hash"]
hash_code_len = config["hash_code_len"]
hashtable_capacity = int(2**hash_code_len)
if config["use_fast_hash"]:
query_hash_code, key_hash_code = lsh_cumulation.fast_hash(
query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1
)
else:
query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len)
cumulation_value = lsh_cumulation.lsh_cumulation(
query_mask,
query_hash_code,
key_mask,
key_hash_code,
value,
hashtable_capacity,
use_cuda,
1,
)
ctx.save_for_backward(
query_mask, key_mask, query_hash_code, key_hash_code, query, key, value
)
ctx.config = config
return cumulation_value
@staticmethod
def backward(ctx, grad):
grad = to_contiguous(grad)
query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors
config = ctx.config
use_cuda = grad.is_cuda
hash_code_len = config["hash_code_len"]
hashtable_capacity = int(2**hash_code_len)
if config["lsh_backward"]:
grad_value = lsh_cumulation.lsh_cumulation(
key_mask,
key_hash_code,
query_mask,
query_hash_code,
grad,
hashtable_capacity,
use_cuda,
1,
)
grad_query = lsh_cumulation.lsh_weighted_cumulation(
query_mask,
query_hash_code,
grad,
key_mask,
key_hash_code,
value,
(hash_code_len / 2) * key,
hashtable_capacity,
use_cuda,
4,
)
grad_key = lsh_cumulation.lsh_weighted_cumulation(
key_mask,
key_hash_code,
value,
query_mask,
query_hash_code,
grad,
(hash_code_len / 2) * query,
hashtable_capacity,
use_cuda,
4,
)
else:
expectation = (
1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi
) ** hash_code_len
expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key)
grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query)
grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
return None, None, grad_query, grad_key, grad_value, None
# Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings
class YosoEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.tok = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.pos = qc.Embed(config.n_pos + 2, config.d_model)
self.typ = qc.Embed(config.n_typ, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)) + 2)
self.pos_type = getattr(config, "pos_type", "absolute")
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(
self.position_ids.size(), dtype=torch.long, device=self.position_ids.device
),
persistent=False,
)
def forward(self, x=None, typ=None, pos=None, inputs_embeds=None):
if x is not None:
s = x.size()
else:
s = inputs_embeds.size()[:-1]
seq_length = s[1]
if pos is None:
pos = self.position_ids[:, :seq_length]
if typ is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(s[0], seq_length)
typ = buffered_token_type_ids_expanded
else:
typ = torch.zeros(s, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.tok(x)
token_type_embeddings = self.typ(typ)
embeddings = inputs_embeds + token_type_embeddings
if self.pos_type == "absolute":
position_embeddings = self.pos(pos)
embeddings += position_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
class YosoSelfAttention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.attention_head_size = int(config.d_model / config.n_heads)
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.drop = qc.Dropout(config.drop_attn)
self.pos_type = pos_type if pos_type is not None else config.pos_type
self.use_expectation = config.use_expectation
self.hash_code_len = config.hash_code_len
self.use_conv = config.conv_window is not None
self.use_fast_hash = config.use_fast_hash
self.num_hash = config.num_hash
self.lsh_backward = config.lsh_backward
self.lsh_config = {
"hash_code_len": self.hash_code_len,
"use_fast_hash": self.use_fast_hash,
"num_hash": self.num_hash,
"lsh_backward": self.lsh_backward,
}
if config.conv_window is not None:
self.conv = nn.Conv2d(
in_channels=config.n_heads,
out_channels=config.n_heads,
kernel_size=(config.conv_window, 1),
padding=(config.conv_window // 2, 0),
bias=False,
groups=config.n_heads,
)
def transpose_for_scores(self, layer):
new_layer_shape = layer.size()[:-1] + (self.n_heads, self.attention_head_size)
layer = layer.view(*new_layer_shape)
return layer.permute(0, 2, 1, 3)
def forward(self, hiddens, attention_mask=None, output_attentions=False):
mixed_query_layer = self.query(hiddens)
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.use_conv:
conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None])
batch_size, n_heads, seq_len, head_dim = query_layer.size()
query_layer = query_layer.reshape(batch_size * n_heads, seq_len, head_dim)
key_layer = key_layer.reshape(batch_size * n_heads, seq_len, head_dim)
value_layer = value_layer.reshape(batch_size * n_heads, seq_len, head_dim)
# revert changes made by get_extended_attention_mask
attention_mask = 1.0 + attention_mask / 10000.0
attention_mask = (
attention_mask.squeeze()
.repeat(1, n_heads, 1)
.reshape(batch_size * n_heads, seq_len)
.int()
)
# The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs
# smaller than this are padded with zeros.
gpu_warp_size = 32
if (not self.use_expectation) and head_dim < gpu_warp_size:
pad_size = batch_size * n_heads, seq_len, gpu_warp_size - head_dim
query_layer = torch.cat(
[
query_layer,
torch.zeros(pad_size, device=query_layer.device),
],
dim=-1,
)
key_layer = torch.cat(
[
key_layer,
torch.zeros(pad_size, device=key_layer.device),
],
dim=-1,
)
value_layer = torch.cat(
[
value_layer,
torch.zeros(pad_size, device=value_layer.device),
],
dim=-1,
)
if self.use_expectation or self.training:
query_layer, key_layer = normalize([query_layer, key_layer])
if self.use_expectation:
context_layer = YosoCumulation.apply(
attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config
)
else:
context_layer = YosoLSHCumulation.apply(
attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config
)
if (not self.use_expectation) and head_dim < gpu_warp_size:
context_layer = context_layer[:, :, :head_dim]
context_layer = normalize(context_layer)
context_layer = context_layer.reshape(batch_size, n_heads, seq_len, head_dim)
if self.use_conv:
context_layer += conv_value_layer
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, context_layer) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class YosoSelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Attention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
self.self = YosoSelfAttention(config, pos_type=pos_type)
self.output = YosoSelfOutput(config)
def forward(self, hiddens, attention_mask=None, output_attentions=False):
self_outputs = self.self(hiddens, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hiddens)
outputs = (attention_output,) + self_outputs[1:] # add attns if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class YosoIntermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
# Copied from transformers.models.bert.modeling_bert.BertOutput
class YosoOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Layer(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(config)
self.add_cross_attention = config.add_cross_attention
self.intermediate = YosoIntermediate(config)
self.output = YosoOutput(config)
def forward(self, hiddens, attention_mask=None, output_attentions=False):
self_attention_outputs = self.attention(
hiddens, attention_mask, output_attentions=output_attentions
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attns if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
)
else:
layer_outputs = layer_module(hiddens, attention_mask, output_attentions)
hiddens = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v for v in [hiddens, all_hidden_states, all_self_attentions] if v is not None
)
return qo.BaseWithCrossAttentions(
y=hiddens,
hiddens=all_hidden_states,
attns=all_self_attentions,
)
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = YosoEmbeddings(config)
self.encoder = Encoder(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return qo.BaseWithCrossAttentions(
y=sequence_output,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
crosses=encoder_outputs.crosses,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Predictor(cfg.d_model, **kw)
forward = qf.forward_masked
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.yoso = Model(config)
self.pre_classifier = qc.Linear(config.d_model, config.d_model)
self.classifier = qc.Linear(config.d_model, 1)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
position_ids = (
position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
)
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.yoso(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(cfg.d_model, **kw)
forward = qf.forward_seq
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForQA(PreTrained):
def __init__(self, **kw):
kw.update(n_labels=2)
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,528
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/language/test_subprocess.py
|
import os
import subprocess
import sys
import pytest
dir_path = os.path.dirname(os.path.realpath(__file__))
print_path = os.path.join(dir_path, "print_helper.py")
assert_path = os.path.join(dir_path, "assert_helper.py")
# TODO: bfloat16 after LLVM-15
func_types = ["device_assert", "assert", "static_assert"]
torch_types = ["int8", "uint8", "int16", "int32", "long", "float16", "float32", "float64"]
@pytest.mark.parametrize("func_type, data_type",
[("device_print", data_type) for data_type in torch_types] + [("print", "int32"), ("static_print", "int32")])
def test_print(func_type: str, data_type: str):
proc = subprocess.Popen([sys.executable, print_path, func_type, data_type], stdout=subprocess.PIPE, shell=False)
outs, _ = proc.communicate()
outs = outs.split()
new_lines = set()
for line in outs:
try:
value = line
if func_type != "static_print":
value = int(float(line))
new_lines.add(value)
except Exception as e:
print(e)
if func_type != "static_print":
for i in range(128):
assert i in new_lines
assert len(new_lines) == 128
else:
assert len(new_lines) == 1
@pytest.mark.parametrize("func_type", func_types)
def test_assert(func_type: str):
os.environ["TRITON_DEBUG"] = "1"
proc = subprocess.Popen([sys.executable, assert_path, func_type], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
_, errs = proc.communicate()
errs = errs.splitlines()
num_errs = 0
for err in errs:
if "x != 0" in err.decode("utf-8"):
num_errs += 1
os.environ["TRITON_DEBUG"] = "0"
if func_type != "static_assert":
assert num_errs == 127
else:
assert num_errs == 0
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,529
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/canine.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import copy
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.canine import PreTrained
log = logging.get_logger(__name__)
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.canine = Model(config)
self.drop = qc.Dropout(config.drop)
self.classifier = qc.Linear(config.d_model, 1)
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
position_ids = (
position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
)
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.canine(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(**kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
forward = qf.forward_qa
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_seq
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(**kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class CaninePredictionHeadTransform(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_model)
self.act = qu.activation(cfg.act)
self.norm = qc.LayerNorm(cfg.d_model, eps=cfg.eps)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
y = self.norm(y)
return y
class LMHead(qc.Module):
def __init__(self, config):
super().__init__()
self.transform = CaninePredictionHeadTransform(config)
self.decoder = qc.Linear(config.d_model, config.s_vocab, bias=False)
self.bias = nn.Parameter(torch.zeros(config.s_vocab))
self.decoder.bias = self.bias
def forward(self, x):
y = self.transform(x)
y = self.decoder(y)
return y
class CanineOnlyMLMHead(qc.Module):
def __init__(self, config):
super().__init__()
self.predictions = LMHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class Model(PreTrained):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
shallow_config = copy.deepcopy(config)
shallow_config.n_lays = 1
self.char_embeddings = Embed(config)
self.initial_char_encoder = Encoder(
shallow_config,
local=True,
always_attend_to_first_position=False,
first_position_attends_to_all=False,
attend_from_chunk_width=config.local_transformer_stride,
attend_from_chunk_stride=config.local_transformer_stride,
attend_to_chunk_width=config.local_transformer_stride,
attend_to_chunk_stride=config.local_transformer_stride,
)
self.chars_to_molecules = CharactersToMolecules(config)
self.encoder = Encoder(config)
self.projection = ConvProjection(config)
self.final_char_encoder = Encoder(shallow_config)
self.pool = Pool(config) if add_pooling_layer else None
self.post_init()
def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask):
batch_size, from_seq_length = from_tensor.shape[0], from_tensor.shape[1]
to_seq_length = to_mask.shape[1]
to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float()
broadcast_ones = torch.ones(
size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device
)
mask = broadcast_ones * to_mask
return mask
def _downsample_attention_mask(self, char_attention_mask, downsampling_rate):
batch_size, char_seq_len = char_attention_mask.shape
poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len))
pooled_molecule_mask = torch.nn.MaxPool1d(
kernel_size=downsampling_rate, stride=downsampling_rate
)(poolable_char_mask.float())
molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1)
return molecule_attention_mask
def _repeat_molecules(self, molecules, char_seq_length):
rate = self.config.downsampling_rate
molecules_without_extra_cls = molecules[:, 1:, :]
repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2)
last_molecule = molecules[:, -1:, :]
remainder_length = torch.fmod(torch.tensor(char_seq_length), torch.tensor(rate)).item()
remainder_repeated = torch.repeat_interleave(
last_molecule,
# +1 molecule to compensate for truncation.
repeats=remainder_length + rate,
dim=-2,
)
# `repeated`: [batch_size, char_seq_len, molecule_hidden_size]
return torch.cat([repeated, remainder_repeated], dim=-2)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
molecule_attention_mask = self._downsample_attention_mask(
attention_mask, downsampling_rate=self.config.downsampling_rate
)
extended_molecule_attention_mask = self.get_extended_attention_mask(
molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1]), device
)
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
input_char_embeddings = self.char_embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
char_attention_mask = self._create_3d_attention_mask_from_input_mask(
input_ids, attention_mask
)
init_chars_encoder_outputs = self.initial_char_encoder(
input_char_embeddings,
attention_mask=char_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
input_char_encoding = init_chars_encoder_outputs.y
init_molecule_encoding = self.chars_to_molecules(input_char_encoding)
encoder_outputs = self.encoder(
init_molecule_encoding,
attention_mask=extended_molecule_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
molecule_sequence_output = encoder_outputs[0]
pooled_output = self.pool(molecule_sequence_output) if self.pool is not None else None
repeated_molecules = self._repeat_molecules(
molecule_sequence_output, char_seq_length=input_shape[-1]
)
concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1)
sequence_output = self.projection(concat)
final_chars_encoder_outputs = self.final_char_encoder(
sequence_output,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = final_chars_encoder_outputs.y
if output_hidden_states:
deep_encoder_hidden_states = (
encoder_outputs.hiddens if return_dict else encoder_outputs[1]
)
all_hidden_states = (
all_hidden_states
+ init_chars_encoder_outputs.hiddens
+ deep_encoder_hidden_states
+ final_chars_encoder_outputs.hiddens
)
if output_attentions:
deep_encoder_self_attentions = (
encoder_outputs.attns if return_dict else encoder_outputs[-1]
)
all_self_attentions = (
all_self_attentions
+ init_chars_encoder_outputs.attns
+ deep_encoder_self_attentions
+ final_chars_encoder_outputs.attns
)
if not return_dict:
output = (sequence_output, pooled_output)
output += tuple(v for v in [all_hidden_states, all_self_attentions] if v is not None)
return output
return qo.WithPools(
y=sequence_output,
pools=pooled_output,
hiddens=all_hidden_states,
attns=all_self_attentions,
)
class Encoder(qc.Module):
def __init__(
self,
config,
local=False,
always_attend_to_first_position=False,
first_position_attends_to_all=False,
attend_from_chunk_width=128,
attend_from_chunk_stride=128,
attend_to_chunk_width=128,
attend_to_chunk_stride=128,
):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[
Layer(
config,
local,
always_attend_to_first_position,
first_position_attends_to_all,
attend_from_chunk_width,
attend_from_chunk_stride,
attend_to_chunk_width,
attend_to_chunk_stride,
)
for _ in range(config.n_lays)
]
)
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(
hiddens, attention_mask, layer_head_mask, output_attentions
)
hiddens = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v for v in [hiddens, all_hidden_states, all_self_attentions] if v is not None
)
return qo.Base(
y=hiddens,
hiddens=all_hidden_states,
attns=all_self_attentions,
)
class Layer(qc.Module):
def __init__(
self,
config,
local,
always_attend_to_first_position,
first_position_attends_to_all,
attend_from_chunk_width,
attend_from_chunk_stride,
attend_to_chunk_width,
attend_to_chunk_stride,
):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(
config,
local,
always_attend_to_first_position,
first_position_attends_to_all,
attend_from_chunk_width,
attend_from_chunk_stride,
attend_to_chunk_width,
attend_to_chunk_stride,
)
self.intermediate = Intermediate(config)
self.output = Output(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class Attention(qc.Module):
def __init__(
self,
config,
local=False,
always_attend_to_first_position=False,
first_position_attends_to_all=False,
attend_from_chunk_width=128,
attend_from_chunk_stride=128,
attend_to_chunk_width=128,
attend_to_chunk_stride=128,
):
super().__init__()
self.self = SelfAttention(config)
self.output = SelfOutput(config)
self.local = local
assert attend_from_chunk_width >= attend_from_chunk_stride
assert attend_to_chunk_width >= attend_to_chunk_stride
self.always_attend_to_first_position = always_attend_to_first_position
self.first_position_attends_to_all = first_position_attends_to_all
self.attend_from_chunk_width = attend_from_chunk_width
self.attend_from_chunk_stride = attend_from_chunk_stride
self.attend_to_chunk_width = attend_to_chunk_width
self.attend_to_chunk_stride = attend_to_chunk_stride
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
if not self.local:
self_outputs = self.self(hiddens, hiddens, attention_mask, head_mask, output_attentions)
attention_output = self_outputs[0]
else:
from_seq_length = to_seq_length = hiddens.shape[1]
from_tensor = to_tensor = hiddens
from_chunks = []
if self.first_position_attends_to_all:
from_chunks.append((0, 1))
from_start = 1
else:
from_start = 0
for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride):
chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width)
from_chunks.append((chunk_start, chunk_end))
to_chunks = []
if self.first_position_attends_to_all:
to_chunks.append((0, to_seq_length))
for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride):
chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width)
to_chunks.append((chunk_start, chunk_end))
if len(from_chunks) != len(to_chunks):
raise ValueError(
f"Expected to have same number of `from_chunks` ({from_chunks}) and "
f"`to_chunks` ({from_chunks}). Check strides."
)
attention_output_chunks = []
attention_probs_chunks = []
for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks):
from_tensor_chunk = from_tensor[:, from_start:from_end, :]
to_tensor_chunk = to_tensor[:, to_start:to_end, :]
attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end]
if self.always_attend_to_first_position:
cls_attention_mask = attention_mask[:, from_start:from_end, 0:1]
attention_mask_chunk = torch.cat(
[cls_attention_mask, attention_mask_chunk], dim=2
)
cls_position = to_tensor[:, 0:1, :]
to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1)
attention_outputs_chunk = self.self(
from_tensor_chunk,
to_tensor_chunk,
attention_mask_chunk,
head_mask,
output_attentions,
)
attention_output_chunks.append(attention_outputs_chunk[0])
if output_attentions:
attention_probs_chunks.append(attention_outputs_chunk[1])
attention_output = torch.cat(attention_output_chunks, dim=1)
attention_output = self.output(attention_output, hiddens)
outputs = (attention_output,)
if not self.local:
outputs = outputs + self_outputs[1:]
else:
outputs = outputs + tuple(attention_probs_chunks)
return outputs
class Embed(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
shard_embedding_size = config.d_model // config.num_hash_functions
for i in range(config.num_hash_functions):
name = f"HashBucketCodepointEmbedder_{i}"
setattr(self, name, qc.Embed(config.num_hash_buckets, shard_embedding_size))
self.char_position_embeddings = qc.Embed(config.num_hash_buckets, config.d_model)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.pos_type = getattr(config, "pos_type", "absolute")
def _hash_bucket_tensors(self, input_ids, num_hashes, num_buckets):
if num_hashes > len(_PRIMES):
raise ValueError(f"`num_hashes` must be <= {len(_PRIMES)}")
primes = _PRIMES[:num_hashes]
result_tensors = []
for prime in primes:
hashed = ((input_ids + 1) * prime) % num_buckets
result_tensors.append(hashed)
return result_tensors
def _embed_hash_buckets(self, input_ids, d_embed, num_hashes, num_buckets):
if d_embed % num_hashes != 0:
raise ValueError(f"Expected `d_embed` ({d_embed}) % `num_hashes` ({num_hashes}) == 0")
hash_bucket_tensors = self._hash_bucket_tensors(
input_ids, num_hashes=num_hashes, num_buckets=num_buckets
)
embedding_shards = []
for i, hash_bucket_ids in enumerate(hash_bucket_tensors):
name = f"HashBucketCodepointEmbedder_{i}"
shard_embeddings = getattr(self, name)(hash_bucket_ids)
embedding_shards.append(shard_embeddings)
return torch.cat(embedding_shards, dim=-1)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self._embed_hash_buckets(
input_ids,
self.config.d_model,
self.config.num_hash_functions,
self.config.num_hash_buckets,
)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.pos_type == "absolute":
position_embeddings = self.char_position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
class CharactersToMolecules(qc.Module):
def __init__(self, config):
super().__init__()
self.conv = qc.Conv1d(
in_channels=config.d_model,
out_channels=config.d_model,
kernel_size=config.downsampling_rate,
stride=config.downsampling_rate,
)
self.act = qu.activation(config.act)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
def forward(self, char_encoding):
cls_encoding = char_encoding[:, 0:1, :]
char_encoding = torch.transpose(char_encoding, 1, 2)
downsampled = self.conv(char_encoding)
downsampled = torch.transpose(downsampled, 1, 2)
downsampled = self.act(downsampled)
downsampled_truncated = downsampled[:, 0:-1, :]
result = torch.cat([cls_encoding, downsampled_truncated], dim=1)
result = self.norm(result)
return result
class ConvProjection(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv = qc.Conv1d(
in_channels=config.d_model * 2,
out_channels=config.d_model,
kernel_size=config.upsampling_kernel_size,
stride=1,
)
self.act = qu.activation(config.act)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, inputs, final_seq_char_positions=None):
inputs = torch.transpose(inputs, 1, 2)
pad_total = self.config.upsampling_kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
pad = nn.ConstantPad1d((pad_beg, pad_end), 0)
result = self.conv(pad(inputs))
result = torch.transpose(result, 1, 2)
result = self.act(result)
result = self.norm(result)
result = self.drop(result)
final_char_seq = result
if final_seq_char_positions is not None:
raise NotImplementedError("ForMasked is currently not supported")
else:
query_seq = final_char_seq
return query_seq
class SelfAttention(qc.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.attention_head_size = int(config.d_model / config.n_heads)
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.drop = qc.Dropout(config.drop_attn)
self.pos_type = getattr(config, "pos_type", "absolute")
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
self.n_pos = config.n_pos
self.distance_embedding = qc.Embed(2 * config.n_pos - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
from_tensor,
to_tensor,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(from_tensor)
key_layer = self.transpose_for_scores(self.key(to_tensor))
value_layer = self.transpose_for_scores(self.value(to_tensor))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
seq_length = from_tensor.size()[1]
position_ids_l = torch.arange(
seq_length, dtype=torch.long, device=from_tensor.device
).view(-1, 1)
position_ids_r = torch.arange(
seq_length, dtype=torch.long, device=from_tensor.device
).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.n_pos - 1)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.pos_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.pos_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores + relative_position_scores_query + relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
if attention_mask.ndim == 3:
# if attention_mask is 3D, do the following:
attention_mask = torch.unsqueeze(attention_mask, dim=1)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
attention_mask = (1.0 - attention_mask.float()) * -10000.0
# Apply the attention mask (precomputed for all layers in CanineModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.drop(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class SelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Intermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
class Output(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
LIST = ["google/canine-s", "google/canine-r"]
_PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223]
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,530
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/reformer.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
[],
dict(
act="relu",
attention_head_size=64,
attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
axial_norm_std=1.0,
axial_pos_embds_dim=[64, 192],
axial_pos_embds=True,
axial_pos_shape=[64, 64],
chunk_size_lm_head=0,
d_hidden=256,
drop_proj=None,
drop=0.05,
EOS=2,
eps=1e-12,
feed_forward_size=512,
hash_seed=None,
init_range=0.02,
is_decoder=False,
local_attention_probs_dropout_prob=0.05,
local_attn_chunk_length=64,
local_num_chunks_after=0,
local_num_chunks_before=1,
lsh_attention_probs_dropout_prob=0.0,
lsh_attn_chunk_length=64,
lsh_num_chunks_after=0,
lsh_num_chunks_before=1,
model_type="reformer",
n_heads=12,
n_pos=4096,
num_buckets=None,
num_hashes=1,
PAD=0,
s_vocab=320,
tie_word_embeddings=False,
y_cache=True,
),
)
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
if isinstance(module, AxialPositionEmbeddings):
for weight in module.weights:
nn.init.normal_(weight, std=self.config.axial_norm_std)
elif isinstance(module, qc.Embed):
module.weight.data.normal_(mean=0.0, std=self.config.init_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, qc.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, qc.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
MAP = {
"google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/config.json",
"google/reformer-enwik8": "https://huggingface.co/google/reformer-enwik8/resolve/main/config.json",
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,531
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/trafo_std.py
|
import argparse
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncLayer
from io import open
from ..utils import Corpus
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", type=str, default="./model.pt")
parser.add_argument("--outf", type=str, default="generated.txt")
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--words", type=int, default="1000")
parser.add_argument("--bptt", type=int, default=35)
parser.add_argument("--clip", type=float, default=0.25)
parser.add_argument("--data", type=str, default="./data/wikitext-2")
parser.add_argument("--drop", type=float, default=0.2)
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--emsize", type=int, default=200)
parser.add_argument("--log-interval", type=int, default=200, metavar="N")
parser.add_argument("--nhead", type=int, default=2)
parser.add_argument("--nhid", type=int, default=200)
parser.add_argument("--nlayers", type=int, default=2)
parser.add_argument("--save", type=str, default="model.pt")
parser.add_argument("--tied", action="store_true")
args = parser.parse_args()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
corpus = Corpus(args.data)
class RNN(qc.Module):
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, drop=0.5, tie_weights=False):
super().__init__()
self.ntoken = ntoken
self.drop = qc.Dropout(drop)
self.encoder = qc.Embed(ntoken, ninp)
if rnn_type in ["LSTM", "GRU"]:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, drop=drop)
else:
try:
nonlinearity = {"RNN_TANH": "tanh", "RNN_RELU": "relu"}[rnn_type]
except KeyError:
raise ValueError(
"""An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']"""
)
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, drop=drop)
self.decoder = nn.Linear(nhid, ntoken)
if tie_weights:
if nhid != ninp:
raise ValueError("When using the tied flag, nhid must be equal to emsize")
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == "LSTM":
return (
weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid),
)
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
class PositionalEncoding(qc.Module):
def __init__(self, d_hidden, drop=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.drop = qc.Dropout(p=drop)
pe = torch.zeros(max_len, d_hidden)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_hidden, 2).float() * (-math.log(10000.0) / d_hidden))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.drop(x)
class Transformer(qc.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, drop=0.5):
super().__init__()
self.model_type = "Transformer"
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, drop)
n_enc_lays = TransformerEncLayer(ninp, nhead, nhid, drop)
self.transformer_encoder = TransformerEncoder(n_enc_lays, nlayers)
self.encoder = qc.Embed(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
def batchify(data, bsz):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
train_data = batchify(corpus.train, args.train_batch_size)
eval_data = batchify(corpus.eval, args.eval_batch_size)
test_data = batchify(corpus.test, args.eval_batch_size)
ntokens = len(corpus.dictionary)
if args.model_name == "Transformer":
model = Transformer(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.drop).to(
device
)
else:
model = RNN(
args.model_name, ntokens, args.emsize, args.nhid, args.nlayers, args.drop, args.tied
).to(device)
criterion = nn.NLLLoss()
def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def evaluate(data_source):
model.eval()
total_loss = 0.0
ntokens = len(corpus.dictionary)
if args.model_name != "Transformer":
hidden = model.init_hidden(args.eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
if args.model_name == "Transformer":
output = model(data)
output = output.view(-1, ntokens)
else:
output, hidden = model(data, hidden)
hidden = repackage_hidden(hidden)
total_loss += len(data) * criterion(output, targets).item()
return total_loss / (len(data_source) - 1)
def train():
model.train()
total_loss = 0.0
start_time = time.time()
ntokens = len(corpus.dictionary)
if args.model_name != "Transformer":
hidden = model.init_hidden(args.train_batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
model.zero_grad()
if args.model_name == "Transformer":
output = model(data)
output = output.view(-1, ntokens)
else:
hidden = repackage_hidden(hidden)
output, hidden = model(data, hidden)
loss = criterion(output, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(p.grad, alpha=-lr)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print(
"| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
epoch,
batch,
len(train_data) // args.bptt,
lr,
elapsed * 1000 / args.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
if args.dry_run:
break
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.train_epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(eval_data)
print("-" * 89)
print(
"| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f}".format(
epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss)
)
)
print("-" * 89)
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, "wb") as f:
torch.save(model, f)
best_val_loss = val_loss
else:
lr /= 4.0
except KeyboardInterrupt:
print("-" * 89)
print("Exiting from training early")
with open(args.save, "rb") as f:
model = torch.load(f)
if args.model_name in ["RNN_TANH", "RNN_RELU", "LSTM", "GRU"]:
model.rnn.flatten_parameters()
test_loss = evaluate(test_data)
print("=" * 89)
print(
"| End of training | test loss {:5.2f} | test ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
)
print("=" * 89)
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3.")
with open(args.checkpoint, "rb") as f:
model = torch.load(f, map_location=device)
model.eval()
corpus = Corpus(args.data)
ntokens = len(corpus.dictionary)
is_transformer_model = hasattr(model, "model_type") and model.model_type == "Transformer"
if not is_transformer_model:
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, "w") as outf:
with torch.no_grad():
for i in range(args.words):
if is_transformer_model:
output = model(input, False)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input = torch.cat([input, word_tensor], 0)
else:
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ("\n" if i % 20 == 19 else " "))
if i % args.log_interval == 0:
print("| Generated {}/{} words".format(i, args.words))
"""
python main.py --cuda --train_epochs 6
python main.py --cuda --train_epochs 6 --tied
python main.py --cuda --tied
python main.py --cuda --train_epochs 6 --model Transformer --lr 5
python generate.py
python generate.py --cuda --model Transformer
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,532
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/examples/empty.py
|
import torch
import triton
import triton.language as tl
@triton.jit
def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr):
pass
X = torch.randn(1, device="cuda")
pgm = kernel[(1,)](X, 1, 1, BLOCK=1024)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,533
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/blog.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import shutil as sh
import filecmp as fl
import pathlib as pth
from .log import Logger
from .base import config
# from .mirror import copy
from .counter import counters
log = Logger(__name__)
SUFF = '.rst'
def copy(src, dst):
dst.parent.mkdir(parents=True, exist_ok=True)
if isinstance(src, pth.Path):
if dst.exists():
assert fl.cmp(str(src), str(dst), False)
else:
sh.copy2(str(src), str(dst))
else:
assert isinstance(src, str)
if dst.exists():
assert src == dst.read_text()
else:
dst.write_text(src)
class Blog:
def __init__(self, base):
self.base = base
populate_args = ((('chained', '.'), ('blogged', '+'), ('excluded', '-'),
('failed', 'F')), 'Populating:')
def populate(self, dst, ctxt, **kw):
kw.update(ctxt=ctxt)
dst = self.base / dst
with counters(self.export_args, kw) as cs:
for _, ms in ctxt.recs.chainer(**kw):
for m in ms:
a = '\n'.join(m.blogger(**kw))
(dst / m.slug).with_suffix(SUFF).write_text(a)
o = m.hdr.original
if o:
s = self.base / config.docs_src / m.source / o
copy(s, dst / s.name)
cs.incr('+')
return cs
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,534
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/dataset/big_patent.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import glob
import gzip
import json
import os
import datasets as ds
_URL = "https://drive.google.com/uc?export=download&id=1J3mucMFTWrgAYa3LuBZoLRR3CzzYD3fa"
_DOC = "description"
_SUM = "abstract"
_CPC = {
"a": "Human Necessities",
"b": "Performing Operations; Transporting",
"c": "Chemistry; Metallurgy",
"d": "Textiles; Paper",
"e": "Fixed Constructions",
"f": "Mechanical Engineering; Lightning; Heating; Weapons; Blasting",
"g": "Physics",
"h": "Electricity",
"y": "General tagging of new or cross-sectional technology",
}
class Config(ds.BuilderConfig):
def __init__(self, *xs, cpc=None, **kw):
super().__init__(*xs, version=ds.Version("1.0.0"), **kw)
self.cpc = cpc
class BigPatent(ds.GeneratorBasedBuilder):
BUILDER_CONFIGS = [Config(cpc=list(_CPC), name="all")] + [
Config(cpc=[k], name=k) for k, v in sorted(_CPC.items())
]
def _info(self):
return ds.DatasetInfo(
description="",
citation="",
homepage="",
license="",
features=ds.Features({_DOC: ds.Value("string"), _SUM: ds.Value("string")}),
supervised_keys=(_DOC, _SUM),
)
def _split_generators(self, mgr):
p = mgr.download_and_extract(_URL)
ks = ["train", "valid", "test"]
fs = mgr.extract({k: os.path.join(p, "bigPatentData", k + ".tar.gz") for k in ks})
fs = {k: os.path.join(fs[k], k) for k in ks}
return [
ds.SplitGenerator(name=ds.Split.TRAIN, gen_kw={"path": fs["train"]}),
ds.SplitGenerator(name=ds.Split.VALIDATION, gen_kw={"path": fs["val"]}),
ds.SplitGenerator(name=ds.Split.TEST, gen_kw={"path": fs["test"]}),
]
def _generate_examples(self, path=None):
for c in self.config.cpc:
ns = glob.glob(os.path.join(path, c, "*"))
for n in sorted(ns):
with open(n, "rb") as f:
f = gzip.GzipFile(fileobj=f)
for r in f:
x = json.loads(r)
yield x["publication_number"], {_DOC: x[_DOC], _SUM: x[_SUM]}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,535
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/examples/copy_strided.py
|
import triton
import triton.language as tl
# triton kernel
@triton.jit
def kernel(X, stride_xm,
Z, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * 1
Zs = Z + off_m[:, None] * 1 + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
ret = triton.compile(kernel, signature="*fp32,i32,*fp32,i32", constants={"BLOCK_M": 64, "BLOCK_N": 64})
print(ret.asm["ttgir"])
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,536
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/util/doc.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .log import Logger
from .header import Header
log = Logger(__name__)
class PackHeader(Header):
def __init__(self, hdr, **kw):
super().__init__({}, **kw)
self.extract(vars(hdr))
def merge(self, other):
super().merge(other)
if not self.subject and other.subject:
self.subject = other.subject
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,537
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/funnel.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .bert import BertFast
from ..funnel import Tokenizer as Funnel
VOCAB_FS = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_model_names = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
VOCAB_MAP = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt",
"funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt",
"funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt",
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt",
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json",
"funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json",
"funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json",
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json",
},
}
INPUT_CAPS = {f"funnel-transformer/{name}": 512 for name in _model_names}
PRETRAINED_INIT_CONFIGURATION = {
f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names
}
class Tokenizer(BertFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = Funnel
cls_token_type_id = 2
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk="<unk>",
sep="<sep>",
pad="<pad>",
cls="<cls>",
msk="<mask>",
bos="<s>",
eos="</s>",
clean_text=True,
tokenize_chinese_chars=True,
strip_accents=None,
wordpieces_prefix="##",
**kw,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk=unk,
sep=sep,
pad=pad,
cls=cls,
msk=msk,
bos=bos,
eos=eos,
clean_text=clean_text,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
wordpieces_prefix=wordpieces_prefix,
**kw,
)
def create_token_type_ids_from_sequences(self, toks_0, toks_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if toks_1 is None:
return len(cls) * [self.cls_token_type_id] + len(toks_0 + sep) * [0]
return (
len(cls) * [self.cls_token_type_id] + len(toks_0 + sep) * [0] + len(toks_1 + sep) * [1]
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,538
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/try/softmax.py
|
# %%
import torch
import triton
import triton.language as tl
@torch.jit.script
def naive_softmax(x):
# read MN elements ; write M elements
x_max = x.max(dim=1)[0]
# read MN + M elements ; write MN elements
x2 = x - x_max[:, None]
# read MN elements ; write MN elements
nr = torch.exp(x2)
# read MN elements ; write M elements
dr = nr.sum(dim=1)
# read MN + M elements ; write MN elements
y = nr / dr[:, None]
# in total: read 5MN + 2M elements ; wrote 3MN + 2M elements
return y
# %%
@triton.jit
def softmax_kernel(y_ptr, x_ptr, x_stride, y_stride, n_cols, BLOCK: tl.constexpr):
pid = tl.program_id(0)
offsets = tl.arange(0, BLOCK)
x = x_ptr + pid * x_stride + offsets
x = tl.load(x, mask=offsets < n_cols, other=-float("inf"))
nr = tl.exp(x - tl.max(x, axis=0))
dr = tl.sum(nr, axis=0)
y = y_ptr + pid * y_stride + offsets
tl.store(y, nr / dr, mask=offsets < n_cols)
# %%
def softmax(x):
n_rows, n_cols = x.shape
BLOCK = triton.next_power_of_2(n_cols)
n_warps = 4
if BLOCK >= 2048:
n_warps = 8
if BLOCK >= 4096:
n_warps = 16
y = torch.empty_like(x)
softmax_kernel[(n_rows,)](
y,
x,
x.stride(0),
y.stride(0),
n_cols,
num_warps=n_warps,
BLOCK=BLOCK,
)
return y
# %%
torch.manual_seed(0)
x = torch.randn(1823, 781, device="cuda")
y_torch = torch.softmax(x, axis=1)
y_triton = softmax(x)
assert torch.allclose(y_triton, y_torch), (y_triton, y_torch)
# %%
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=["N"],
x_vals=[128 * i for i in range(2, 100)],
line_arg="provider",
line_vals=["triton", "torch-native", "torch-jit"],
line_names=["Triton", "Torch (native)", "Torch (jit)"],
styles=[("blue", "-"), ("green", "-"), ("green", "--")],
ylabel="GB/s",
plot_name="softmax-performance",
args={"M": 4096},
)
)
def benchmark(M, N, provider):
x = torch.randn(M, N, device="cuda", dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == "torch-native":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch.softmax(x, axis=-1), quantiles=quantiles
)
if provider == "triton":
ms, min_ms, max_ms = triton.testing.do_bench(lambda: softmax(x), quantiles=quantiles)
if provider == "torch-jit":
ms, min_ms, max_ms = triton.testing.do_bench(lambda: naive_softmax(x), quantiles=quantiles)
gbps = lambda ms: 2 * x.nelement() * x.element_size() * 1e-9 / (ms * 1e-3)
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(show_plots=True, print_data=True)
# %%
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,539
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/xnli.py
|
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# fine-tune multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM)
import logging
import random
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
CONFIG_MAPPING,
AutoModelForSequenceClassification,
DataCollatorWithPadding,
default_data_collator,
)
from .params import TRAIN, EVAL, TEST, ALL, EACH
from .runner import Runner as Base
log = logging.getLogger(__name__)
class Runner(Base):
AutoModel = AutoModelForSequenceClassification
@property
def dataset(self):
if self._dataset is None:
ps = self.params
y = {TRAIN: {}, EVAL: {}, TEST: {}}
if ps.do_train:
if ps.train_language is None:
y[TRAIN] = load_dataset(
"xnli", ps.language, split=TRAIN, cache_dir=ps.cache_dir
)
else:
y[TRAIN] = load_dataset(
"xnli", ps.train_language, split=TRAIN, cache_dir=ps.cache_dir
)
self.label_list = y.features["label"].names
if ps.do_eval:
y[EVAL] = load_dataset("xnli", ps.language, split=EVAL, cache_dir=ps.cache_dir)
self.label_list = y.features["label"].names
if ps.do_test:
y[TEST] = load_dataset("xnli", ps.language, split=TEST, cache_dir=ps.cache_dir)
self.label_list = y.features["label"].names
self._dataset = y
return self._dataset
@property
def cols(self):
if self._cols is None:
cs = self.dataset[TRAIN].column_names
t = "text" if "text" in cs else cs[0]
self._cols = {ALL: cs, EACH: [t]}
return self._cols
@property
def config(self):
if self._config is None:
ps = self.params
x = ps.config_name if ps.config_name else ps.model_name
if x:
y = self.AutoConfig.from_pretrained(
x,
n_labels=len(self.label_list),
finetune="xnli",
cache_dir=ps.cache_dir,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
else:
y = CONFIG_MAPPING[ps.model_type]()
log.warning("Creating new config")
self._config = y
return self._config
@property
def tokenizer(self):
if self._tokenizer is None:
ps = self.params
x = ps.tokenizer_name if ps.tokenizer_name else ps.model_name
if not x:
raise ValueError("Tokenizer from scratch is not supported")
y = self.AutoTokenizer.from_pretrained(
x,
lower_case=ps.lower_case,
cache_dir=ps.cache_dir,
use_fast=ps.use_fast_tokenizer,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
self._tokenizer = y
return self._tokenizer
@property
def model(self):
if self._model is None:
ps = self.params
if ps.model_name:
y = self.AutoModel.from_pretrained(
ps.model_name,
from_tf=bool(".ckpt" in ps.model_name),
config=self.config,
cache_dir=ps.cache_dir,
revision=ps.model_version,
use_auth_token=True if ps.use_auth_token else None,
)
else:
log.info("Training new model")
y = self.AutoModel.from_config(self.config)
self._model = y
return self._model
@property
def train_ds(self):
if self._train_ds is None:
ps, mgr, ds = self.params, self.mgr, self.dataset
y = ds[TRAIN]
if ps.max_train_samples is not None:
y = y.select(range(ps.max_train_samples))
with mgr.main_process_first():
y = y.map(
self.prep_for_train,
batched=True,
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on train dataset",
)
for i in random.sample(range(len(y)), 3):
log.info(f"Sample {i} of the training set: {y[i]}")
self._train_ds = y
return self._train_ds
def prep_for_train(self, xs):
return self.tokenizer(
xs["premise"],
xs["hypothesis"],
padding=self.padding,
max_len=self.params.max_seq_length,
truncation=True,
)
@property
def eval_ds(self):
if self._eval_ds is None:
ps, mgr = self.params, self.mgr
y = super().eval_ds
with mgr.main_process_first():
y = y.map(
self.prep_for_train,
batched=True,
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on eval dataset",
)
self._eval_ds = y
return self._eval_ds
@property
def test_ds(self):
if self._test_ds is None:
ps, mgr = self.params, self.mgr
y = super().test_ds
with mgr.main_process_first():
y = y.map(
self.prep_for_eval,
batched=True,
num_proc=ps.num_workers,
remove_columns=self.cols[ALL],
load_from_cache_file=not ps.overwrite_cache,
desc="Running tokenizer on test dataset",
)
self._test_ds = y
return self._test_ds
@property
def loaders(self):
if self._loaders is None:
ps = self.params
if ps.pad_to_max_length:
c = default_data_collator
elif ps.fp16:
c = DataCollatorWithPadding(self.tokenizer, pad_to_multiple_of=8)
else:
c = None
t = DataLoader(
self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.train_batch_size
)
e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.eval_batch_size)
self._loaders = {TRAIN: t, EVAL: e}
if ps.do_test:
p = DataLoader(self.test_ds, collate_fn=c, batch_size=ps.eval_batch_size)
self._loaders[TEST] = p
return self._loaders
@property
def metric(self):
if self._metric is None:
self._metric = load_metric("xnli")
# def compute_metrics(p: EvalPrediction):
# preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
# preds = np.argmax(preds, axis=1)
# return metric.compute(predictions=preds, references=p.label_ids)
return self._metric
def main():
x = Runner()
x.dataset
x.config
x.tokenizer
x.model
# x.model.resize_token_embeddings(len(x.tokenizer))
x.loaders
x.prepare()
x.train()
x.save()
if __name__ == "__main__":
main()
"""
python xnli.py \
--model_name bert-base-multilingual-cased \
--language de \
--train_language en \
--do_train \
--do_eval \
--train_batch_size 32 \
--train_epochs 2.0 \
--max_seq_length 128 \
--out_dir /tmp/debug_xnli/ \
--save_steps -1
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,540
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/longformer.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import forward as qf
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
log = logging.get_logger(__name__)
from torch.nn import CrossEntropyLoss
from ...pytorch_utils import (
apply_chunking_to_forward,
)
LIST = [
"allenai/longformer-base-4096",
"allenai/longformer-large-4096",
"allenai/longformer-large-4096-finetuned-triviaqa",
"allenai/longformer-base-4096-extra.pos.embd.only",
"allenai/longformer-large-4096-extra.pos.embd.only",
]
def _get_question_end_index(input_ids, sep_token_id):
sep_token_indices = (input_ids == sep_token_id).nonzero()
batch_size = input_ids.shape[0]
assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
assert (
sep_token_indices.shape[0] == 3 * batch_size
), f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(
torch.uint8
) * (attention_mask.expand_as(input_ids) < input_ids.shape[-1]).to(torch.uint8)
return attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LongformerEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.position_embeddings = qc.Embed(config.n_pos, config.d_model)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model)
# self.norm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.pos_type = getattr(config, "pos_type", "absolute")
self.padding_idx = config.PAD
self.position_embeddings = qc.Embed(
config.n_pos, config.d_model, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
input_ids.device
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1,
sequence_length + self.padding_idx + 1,
dtype=torch.long,
device=inputs_embeds.device,
)
return position_ids.unsqueeze(0).expand(input_shape)
class LongformerSelfAttention(qc.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.d_model % config.n_heads != 0:
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.head_dim = int(config.d_model / config.n_heads)
self.embed_dim = config.d_model
self.query = qc.Linear(config.d_model, self.embed_dim)
self.key = qc.Linear(config.d_model, self.embed_dim)
self.value = qc.Linear(config.d_model, self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = qc.Linear(config.d_model, self.embed_dim)
self.key_global = qc.Linear(config.d_model, self.embed_dim)
self.value_global = qc.Linear(config.d_model, self.embed_dim)
self.drop = config.drop_attn
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def forward(
self,
hiddens,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
hiddens = hiddens.transpose(0, 1)
# project hidden states
query_vectors = self.query(hiddens)
key_vectors = self.key(hiddens)
value_vectors = self.value(hiddens)
seq_len, batch_size, embed_dim = hiddens.size()
assert (
embed_dim == self.embed_dim
), f"hiddens should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(
seq_len, batch_size, self.n_heads, self.head_dim
).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.n_heads, self.head_dim).transpose(
0, 1
)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, -10000.0
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.n_heads,
self.one_sided_attn_window_size * 2 + 1,
], f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.n_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, n_heads, extra attention count + 2*window+1)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
# free memory
del global_key_attn_scores
attn_probs = F.softmax(
attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.n_heads,
), f"Head mask for a single layer should be of size {(self.n_heads,)}, but is {layer_head_mask.size()}"
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
# free memory
del attn_scores
# apply drop
attn_probs = F.drop(attn_probs, p=self.drop, training=self.training)
value_vectors = value_vectors.view(
seq_len, batch_size, self.n_heads, self.head_dim
).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (
batch_size,
seq_len,
self.n_heads,
self.head_dim,
), "Unexpected size"
attn_output = (
attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
)
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
hiddens=hiddens,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
# The attention weights for tokens with global attention are
# just filler values, they were never used to compute the output.
# Fill with 0 now, the correct values are in 'global_attn_probs'.
attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = F.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2],
hidden_states_padded.size(-1),
hidden_states_padded.size(-2),
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_model_states):
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_model_states.size()
chunked_model_states = F.pad(
chunked_model_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_model_states = chunked_model_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
chunked_model_states = chunked_model_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlap*window_overlap
chunked_model_states = chunked_model_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
chunked_model_states = chunked_model_states[:, :, :, :-1]
return chunked_model_states
@staticmethod
def _chunk(hiddens, window_overlap):
hiddens = hiddens.view(
hiddens.size(0),
hiddens.size(1) // (window_overlap * 2),
window_overlap * 2,
hiddens.size(2),
)
chunk_size = list(hiddens.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hiddens.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hiddens.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len):
beginning_mask_2d = (
input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
)
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(
beginning_mask == 1, -float("inf")
) # `== 1` converts to bool or uint8
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(
ending_mask == 1, -float("inf")
) # `== 1` converts to bool or uint8
def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
batch_size, seq_len, n_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and n_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * n_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * n_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap)
key = self._chunk(key, window_overlap)
diagonal_chunked_attention_scores = torch.einsum(
"bcxd,bcyd->bcxy", (query, key)
) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * n_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attns
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[
:, 0, 1:window_overlap, 1:window_overlap
] = diagonal_chunked_attention_scores[:, 0, : window_overlap - 1, 1 - window_overlap :]
# separate batch_size and n_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, n_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap):
batch_size, seq_len, n_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and n_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * n_heads,
seq_len // window_overlap,
window_overlap,
2 * window_overlap + 1,
)
# group batch_size and n_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * n_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (
batch_size * n_heads,
chunks_count + 1,
3 * window_overlap,
head_dim,
)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(
size=chunked_value_size, stride=chunked_value_stride
)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, n_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(
as_tuple=True
)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.n_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[
is_index_global_attn_nonzero
]
# (batch_size, seq_len, n_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum(
"blhd,bshd->blhs", (query_vectors, key_vectors_only_global)
)
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = -10000.0
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.n_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[
is_index_global_attn_nonzero
]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2).clone(),
value_vectors_only_global.transpose(1, 2).clone(),
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
hiddens,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
):
seq_len, batch_size = hiddens.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hiddens.new_zeros(
max_num_global_attn_indices, batch_size, self.embed_dim
)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hiddens[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hiddens)
global_value_vectors = self.value_global(hiddens)
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.n_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.n_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous()
.view(-1, batch_size * self.n_heads, self.head_dim)
.transpose(0, 1)
) # batch_size * self.n_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous()
.view(-1, batch_size * self.n_heads, self.head_dim)
.transpose(0, 1)
) # batch_size * self.n_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(
global_query_vectors_only_global, global_key_vectors.transpose(1, 2)
)
assert list(global_attn_scores.size()) == [
batch_size * self.n_heads,
max_num_global_attn_indices,
seq_len,
], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.n_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}."
global_attn_scores = global_attn_scores.view(
batch_size, self.n_heads, max_num_global_attn_indices, seq_len
)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = -10000.0
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
-10000.0,
)
global_attn_scores = global_attn_scores.view(
batch_size * self.n_heads, max_num_global_attn_indices, seq_len
)
# compute global attn probs
global_attn_probs_float = F.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
# apply layer head masking
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.n_heads,
), f"Head mask for a single layer should be of size {(self.n_heads,)}, but is {layer_head_mask.size()}"
global_attn_probs_float = layer_head_mask.view(
1, -1, 1, 1
) * global_attn_probs_float.view(
batch_size, self.n_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs_float = global_attn_probs_float.view(
batch_size * self.n_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs = F.drop(
global_attn_probs_float.type_as(global_attn_scores),
p=self.drop,
training=self.training,
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.n_heads,
max_num_global_attn_indices,
self.head_dim,
], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.n_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}."
global_attn_probs = global_attn_probs.view(
batch_size, self.n_heads, max_num_global_attn_indices, seq_len
)
global_attn_output = global_attn_output.view(
batch_size, self.n_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongformerSelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Attention(qc.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.self = LongformerSelfAttention(config, layer_id)
self.output = LongformerSelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self.output(self_outputs[0], hiddens)
outputs = (attn_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongformerIntermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongformerOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Layer(qc.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = Attention(config, layer_id)
self.intermediate = LongformerIntermediate(config)
self.output = LongformerOutput(config)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
hiddens,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_attn_outputs = self.attention(
hiddens,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self_attn_outputs[0]
outputs = self_attn_outputs[1:]
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
)
outputs = (layer_output,) + outputs
return outputs
def ff_chunk(self, attn_output):
intermediate_output = self.intermediate(attn_output)
layer_output = self.output(intermediate_output, attn_output)
return layer_output
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config, layer_id=i) for i in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
padding_len=0,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None # All local attns.
all_global_attentions = () if (output_attentions and is_global_attn) else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layer)
), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}."
for idx, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
head_mask[idx] if head_mask is not None else None,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = layer_module(
hiddens,
attention_mask=attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
hiddens = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (
layer_outputs[2].transpose(2, 3),
)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
# undo padding
if padding_len > 0:
# unpad `hiddens` because the calling function is expecting a length == input_ids.size(1)
hiddens = hiddens[:, :-padding_len]
if output_hidden_states:
all_hidden_states = tuple([state[:, :-padding_len] for state in all_hidden_states])
if output_attentions:
all_attentions = tuple([state[:, :, :-padding_len, :] for state in all_attentions])
if not return_dict:
return tuple(
v
for v in [hiddens, all_hidden_states, all_attentions, all_global_attentions]
if v is not None
)
return qo.Base(
y=hiddens,
hiddens=all_hidden_states,
attns=all_attentions,
globals=all_global_attentions,
)
class Model(PreTrained):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0
assert config.attention_window > 0
config.attention_window = [
config.attention_window
] * config.n_lays # one value per layer
else:
assert len(config.attention_window) == config.n_lays
self.embeddings = LongformerEmbeddings(config)
self.encoder = Encoder(config)
self.pool = Pool(config) if add_pooling_layer else None
def _pad_to_window_size(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
PAD,
):
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
log.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=PAD)
if position_ids is not None:
# pad with position_id = PAD as in modeling_roberta.RobertaEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=PAD)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.PAD,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(
attention_mask, (0, padding_len), value=False
) # no attention on the padding tokens
token_type_ids = F.pad(
token_type_ids, (0, padding_len), value=0
) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask, global_attention_mask):
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
attention_mask = global_attention_mask + 1
return attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
(
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
) = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
PAD=self.config.PAD,
)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)[:, 0, 0, :]
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
padding_len=padding_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pool(sequence_output) if self.pool is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return qo.WithPools(
y=sequence_output,
pools=pooled_output,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
globals=encoder_outputs.globals,
)
class ForMasked(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = Predictor(**kw)
forward = qf.forward_masked
class ForSeqClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = Classifier(cfg.d_model, "tanh", **kw)
def forward(self, x, g_mask=None, **kw):
if g_mask is None:
g_mask = torch.zeros_like(x)
g_mask[:, 0] = 1
return qf.forward_seq(x, g_mask=g_mask, **kw)
class ForTokClass(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = Classifier(**kw)
forward = qf.forward_tok
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
def forward(self, x, globals=None, **kw):
if globals is None:
assert x is not None
globals = _compute_global_attention_mask(x, self.cfg.sep_token_id)
return qf.forward_qa(x, globals=globals, **kw)
class ForChoice(PreTrained):
def __init__(self, config):
super().__init__(config)
self.longformer = Model(config)
self.drop = qc.Dropout(config.drop)
self.classifier = qc.Linear(config.d_model, 1)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
global_attention_mask=None,
head_mask=None,
labels=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None and input_ids is not None:
log.info("Initializing global attention on multiple choice...")
global_attention_mask = torch.stack(
[
_compute_global_attention_mask(
input_ids[:, i], self.config.sep_token_id, before_sep_token=False
)
for i in range(num_choices)
],
dim=1,
)
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
)
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
)
flat_global_attention_mask = (
global_attention_mask.view(-1, global_attention_mask.size(-1))
if global_attention_mask is not None
else None
)
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.longformer(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
global_attention_mask=flat_global_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return qo.WithLoss(
loss=loss,
logits=reshaped_logits,
hiddens=outputs.hiddens,
attns=outputs.attns,
globals=outputs.globals,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,541
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/gpt.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
{"act_sum"},
dict(
act="gelu",
drop_attn=0.1,
drop_embed=0.1,
drop_sum_first=0.1,
drop=0.1,
init_range=0.02,
model_type="openai-gpt",
n_ctx=512,
n_embed=768,
n_heads=12,
n_lays=12,
n_pos=512,
eps=1e-5,
predict_special_tokens=True,
s_vocab=40478,
sum_proj=True,
sum_type="cls_index",
sum_use_proj=True,
),
)
def _init_weights(self, module):
if isinstance(module, (qc.Linear, qc.Conv1D)):
module.weight.data.normal_(mean=0.0, std=self.cfg.init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, qc.Embedding):
module.weight.data.normal_(mean=0.0, std=self.cfg.init_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, qc.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
MAP = {
"openai-gpt": dict(
archs=["LMHead"],
n_special=0,
task_params={"text-generation": {"do_sample": True, "max_len": 50}},
)
}
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,542
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/doc/rectify.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
import codecs
import datetime as dt
import collections as col
def _handler(err):
c_map = {
b'\x87': '',
b'\xa3': '',
b'\xa5': '',
b'\xb4': '',
b'\xb5': '',
b'\xbc': '',
b'\xc1': '',
b'\xc7': '',
b'\xc9': '',
b'\xd2': '"',
b'\xd3': '"',
b'\xd4': "'",
b'\xd5': "'",
b'\xde': 'fi',
b'\xdf': 'fl',
b'\xe1': '',
}
k = err.object[err.start:err.end]
# print('***', k, k.hex())
if k in c_map:
# print('replacing {} with {}'.format(k, c_map[k]))
return c_map[k], err.end
print(err.object[err.start - 20:err.end + 20])
raise err
QNERR = 'qnerr'
codecs.register_error(QNERR, _handler)
flags = r'(?aim)'
mos = r'January|February|March|April|May|June|July|'
mos += r'August|September|October|November|December'
dp = r'(?P<dt>(?:' + mos + r') \d{1,2}, 20\d{2}),?'
dp = re.compile(flags + dp)
def days(txt):
for p in dp.split(txt):
for f in ('%B %d, %Y', ):
try:
d = dt.datetime.strptime(p, f)
p = '{0:%y-%m-%d}'.format(d)
break
except ValueError:
continue
yield p
mp = r'(?P<mo>(?:' + mos + r') of 20\d{2})'
mp = re.compile(flags + mp)
def months(txt):
for p in mp.split(txt):
for f in ('%B of %Y', ):
try:
d = dt.datetime.strptime(p, f)
p = '{0:%Y-%m}'.format(d)
break
except ValueError:
continue
yield p
s_map = col.OrderedDict()
s_map.update((
('Dr.', 'Dr '),
('Mr.', 'Mr '),
('Ms.', 'Ms '),
('Ofc.', 'Ofc '),
('Atty.', 'Atty '),
('Guardian ad litem', 'GAL'),
('Guardian ad Litem', 'GAL'),
('Guardian Ad Litem', 'GAL'),
('Department of Children and Family', 'DCF'),
('Department of Children and Families', 'DCF'),
('Concord Police Officers', 'Police'),
('Concord Police officers', 'Police'),
('Concord police officers', 'Police'),
('police officers', 'Police'),
('Concord Police Department', 'Police'),
('Concord District Court', 'District Court'),
('New Hampshire', 'NH'),
('the Commonwealth of Massachusetts', 'MA'),
('Massachusetts', 'MA'),
('Middlesex Probate and Family Court', 'Family Court'),
('Middlesex Probate & Family Court', 'Family Court'),
('Middlesex Division of the Probate and Family Court', 'Family Court'),
))
s_map.update((
('The Father', 'Dad'),
('the Father', 'Dad'),
('Father', 'Dad'),
('Imre Kifor', 'Dad'),
('Imre', 'Dad'),
('Barbara A.', 'Mom-B'),
('Barbara A', 'Mom-B'),
('Barbara', 'Mom-B'),
('Duchesne', 'Mom-B'),
('Ms Mom-B', 'Mom-B'),
('his former girlfriend', 'Mom-C'),
('Cynthia S.', 'Mom-C'),
('Cynthia S', 'Mom-C'),
('Cynthia', 'Mom-C'),
('Cyndi', 'Mom-C'),
('Cindy', 'Mom-C'),
('Oulton', 'Mom-C'),
('Ms Mom-C', 'Mom-C'),
('Twins', 'Kids-B'),
('twins', 'Kids-B'),
('Evan Kifor', 'Leon'),
('Evan', 'Leon'),
('Anna Kifor', 'Lisa'),
('Anna', 'Lisa'),
('Blake', 'Luke'),
('Belle', 'Lola'),
('Leon and Lisa', 'Kids-B'),
('Lisa and Leon', 'Kids-B'),
))
s_map.update((
('The Defendant', 'Dad'),
('the Defendant', 'Dad'),
('Defendant', 'Dad'),
('The Plaintiff', 'Mom-B'),
('the Plaintiff', 'Mom-B'),
('Plaintiff', 'Mom-B'),
('The children', 'children'),
('the children', 'children'),
('Children', 'Kids-B'),
('children', 'Kids-B'),
('Katie L. Lenihan, Esquire', 'Atty Lenihan'),
('Honorable Court', 'Court-B'),
('Sandy Mahoney', 'Ms Mahoney'),
))
s_map.update((
('Dad, Dad', 'Dad'),
('Dad Dad', 'Dad'),
('Mom-B, Mom-B', 'Mom-B'),
('Mom-B Mom-B', 'Mom-B'),
('Mom-C, Mom-C', 'Mom-C'),
('Mom-C Mom-C', 'Mom-C'),
('Kids-B, Kids-B', 'Kids-B'),
('Kids-B Kids-B', 'Kids-B'),
))
s_map.update((
('\r', '\n'),
('\t', ' '),
(':', ' '),
(';', ','),
(' ', ' '),
(' ', ' '),
(' \n', '\n'),
# ('?', '?.'),
# ('!', '!.'),
('..', '.'),
('.\n', '\n'),
(',\n', '\n'),
('. ', '\n'),
))
def rectify(txt):
# txt = ''.join(days(txt))
# txt = ''.join(months(txt))
# for k, v in s_map.items():
# txt = txt.replace(k, v)
return txt.strip()
def rectifier(txt):
for ln in txt.splitlines():
yield rectify(ln)
if __name__ == '__main__':
print(rectify('dsfaf sc casdf Febru 25, 2011, dfwec asef ef'))
print(rectify('dsfaf sc casdf February 25, 2011, dfwec asef ef'))
print(rectify('dsfaf February 25, 2011, dfwec June 1, 2009 ef'))
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,543
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/pegasus.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
from shutil import copyfile
from ....tokens.fast import PreTrainedTokenizerFast
from ..pegasus import Tokenizer as Pegasus
SPIECE_UNDERLINE = "▁"
VOCAB_FS = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
VOCAB_MAP = {
"vocab_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"
},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
INPUT_CAPS = {
"google/pegasus-xsum": 512,
}
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
slow_tokenizer_class = Pegasus
model_input_names = ["input_ids", "mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad="<pad>",
eos="</s>",
unk="<unk>",
msk="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103,
**kw,
):
self.offset = offset
if additional_special_tokens is not None:
assert isinstance(additional_special_tokens, list)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
additional_special_tokens_extended += [
f"<unk_{i}>"
for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(
additional_special_tokens_extended
):
raise ValueError(
f"Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad=pad,
eos=eos,
unk=unk,
msk=msk,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kw,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
assert all_special_ids == set(range(len(self.additional_special_tokens) + 3))
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return self._special_token_mask(toks_0)
elif toks_1 is None:
return self._special_token_mask(toks_0) + [1]
else:
return self._special_token_mask(toks_0 + toks_1) + [1]
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return toks_0 + [self.EOS]
return toks_0 + toks_1 + [self.EOS]
def save_vocabulary(self, dir, pre=None):
assert self.can_save_slow_tokenizer
path = os.path.join(dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(path):
copyfile(self.vocab_file, path)
return (path,)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,544
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/common/build.py
|
import contextlib
import functools
import io
import os
import shutil
import subprocess
import sys
import sysconfig
import setuptools
# TODO: is_hip shouldn't be here
def is_hip():
import torch
return torch.version.hip is not None
@functools.lru_cache()
def libcuda_dirs():
locs = subprocess.check_output(["whereis", "libcuda.so"]).decode().strip().split()[1:]
return [os.path.dirname(loc) for loc in locs]
@functools.lru_cache()
def rocm_path_dir():
return os.getenv("ROCM_PATH", default="/opt/rocm")
@contextlib.contextmanager
def quiet():
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
try:
yield
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def _build(name, src, srcdir):
if is_hip():
hip_lib_dir = os.path.join(rocm_path_dir(), "lib")
hip_include_dir = os.path.join(rocm_path_dir(), "include")
else:
cuda_lib_dirs = libcuda_dirs()
base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir)
cuda_path = os.path.join(base_dir, "third_party", "cuda")
cu_include_dir = os.path.join(cuda_path, "include")
triton_include_dir = os.path.join(os.path.dirname(__file__), "include")
cuda_header = os.path.join(cu_include_dir, "cuda.h")
triton_cuda_header = os.path.join(triton_include_dir, "cuda.h")
if not os.path.exists(cuda_header) and os.path.exists(triton_cuda_header):
cu_include_dir = triton_include_dir
suffix = sysconfig.get_config_var('EXT_SUFFIX')
so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix))
# try to avoid setuptools if possible
cc = os.environ.get("CC")
if cc is None:
# TODO: support more things here.
clang = shutil.which("clang")
gcc = shutil.which("gcc")
cc = gcc if gcc is not None else clang
if cc is None:
raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.")
# This function was renamed and made public in Python 3.10
if hasattr(sysconfig, 'get_default_scheme'):
scheme = sysconfig.get_default_scheme()
else:
scheme = sysconfig._get_default_scheme()
# 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install
# path changes to include 'local'. This change is required to use triton with system-wide python.
if scheme == 'posix_local':
scheme = 'posix_prefix'
py_include_dir = sysconfig.get_paths(scheme=scheme)["include"]
if is_hip():
ret = subprocess.check_call([cc, src, f"-I{hip_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", f"-L{hip_lib_dir}", "-lamdhip64", "-o", so])
else:
cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so]
cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs]
ret = subprocess.check_call(cc_cmd)
if ret == 0:
return so
# fallback on setuptools
extra_compile_args = []
library_dirs = cuda_lib_dirs
include_dirs = [srcdir, cu_include_dir]
libraries = ['cuda']
# extra arguments
extra_link_args = []
# create extension module
ext = setuptools.Extension(
name=name,
language='c',
sources=[src],
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + ['-O3'],
extra_link_args=extra_link_args,
library_dirs=library_dirs,
libraries=libraries,
)
# build extension module
args = ['build_ext']
args.append('--build-temp=' + srcdir)
args.append('--build-lib=' + srcdir)
args.append('-q')
args = dict(
name=name,
ext_modules=[ext],
script_args=args,
)
with quiet():
setuptools.setup(**args)
return so
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,545
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/language/test_annotations.py
|
from __future__ import annotations
import torch
import triton
import triton.language as tl
def test_annotations():
@triton.jit
def _kernel(X: torch.Tensor, N: int, BLOCK_SIZE: tl.constexpr):
pass
x = torch.empty(1, device='cuda')
_kernel[(1,)](x, x.shape[0], 32)
try:
_kernel[(1,)](x.shape[0], x.shape[0], 32)
except AttributeError:
pass
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,546
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/config/t5.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
from ... import core as qc
class PreTrained(qc.PreTrained):
hs = qc.Hypers(
{"n_dec_lays"},
dict(
d_ff=2048,
d_kv=64,
d_model=512,
drop_rate=0.1,
EOS=1,
eps=1e-6,
feed_forward_proj="relu",
grad_checkpoint=True,
init_factor=1.0,
is_enc_dec=True,
is_parallelizable=True,
model_type="t5",
n_heads=8,
n_lays=6,
PAD=0,
relative_attention_num_buckets=32,
s_vocab=32128,
y_cache=True,
),
)
def __init__(self, **kw):
self.n_dec_lays = n_dec_lays if n_dec_lays is not None else self.n_lays
super().__init__(PAD=PAD, EOS=EOS, is_enc_dec=is_enc_dec, **kw)
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"dec_m": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
factor = self.cfg.initializer_factor # Used for testing weights initialization
if isinstance(module, LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (Model, ForConditionalGeneration, EncoderModel)):
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, DenseReluDense):
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.cfg.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.cfg.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, DenseGatedGeluDense):
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.cfg.d_model) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.cfg.d_model) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.cfg.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, Attention):
d_model = self.cfg.d_model
key_value_proj_dim = self.cfg.d_kv
n_heads = self.cfg.n_heads
module.q.weight.data.normal_(
mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)
)
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
module.o.weight.data.normal_(
mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)
)
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(
mean=0.0, std=factor * ((d_model) ** -0.5)
)
def _set_grad_checkpoint(self, module, value=False):
if isinstance(module, (Attention, Stack)):
module.grad_checkpoint = value
def _shift_right(self, input_ids):
dec_START = self.cfg.dec_START
PAD = self.cfg.PAD
assert (
dec_START is not None
), "self.model.config.dec_START has to be defined. In it is usually set to the PAD. See docs for more information"
if is_torch_fx_proxy(input_ids):
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), dec_START)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = dec_START
assert PAD is not None, "self.model.config.PAD has to be defined."
shifted_input_ids.masked_fill_(shifted_input_ids == -100, PAD)
assert torch.all(
shifted_input_ids >= 0
).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
MAP = {
"t5-small": dict(
archs=["LMHead"],
dec_START=0,
n_pos=512,
eps=1e-06,
y_prev=True,
task_params=dict(
summarization=dict(
early_stop=True,
len_penalty=2.0,
max_len=200,
min_len=30,
s_no_repeat_ngram=3,
n_beams=4,
prefix="summarize: ",
),
translation_en_to_de=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to German: ",
),
translation_en_to_ro=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to Romanian: ",
),
),
),
"t5-base": dict(
archs=["LMHead"],
d_ff=3072,
d_model=768,
dec_START=0,
n_heads=12,
n_lays=12,
n_pos=512,
eps=1e-06,
y_prev=True,
task_params=dict(
summarization=dict(
early_stop=True,
len_penalty=2.0,
max_len=200,
min_len=30,
s_no_repeat_ngram=3,
n_beams=4,
prefix="summarize: ",
),
translation_en_to_de=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to German: ",
),
translation_en_to_ro=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to Romanian: ",
),
),
),
"t5-large": dict(
archs=["LMHead"],
d_ff=4096,
d_model=1024,
dec_START=0,
n_heads=16,
n_lays=24,
n_pos=512,
eps=1e-06,
y_prev=True,
task_params=dict(
summarization=dict(
early_stop=True,
len_penalty=2.0,
max_len=200,
min_len=30,
s_no_repeat_ngram=3,
n_beams=4,
prefix="summarize: ",
),
translation_en_to_de=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to German: ",
),
translation_en_to_ro=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to Romanian: ",
),
),
),
"t5-3b": dict(
archs=["LMHead"],
d_ff=16384,
d_model=1024,
d_kv=128,
dec_START=0,
n_heads=32,
n_lays=24,
n_pos=512,
eps=1e-06,
y_prev=True,
task_params=dict(
summarization=dict(
early_stop=True,
len_penalty=2.0,
max_len=200,
min_len=30,
s_no_repeat_ngram=3,
n_beams=4,
prefix="summarize: ",
),
translation_en_to_de=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to German: ",
),
translation_en_to_ro=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to Romanian: ",
),
),
),
"t5-11b": dict(
archs=["LMHead"],
d_ff=65536,
d_model=1024,
d_kv=128,
dec_START=0,
n_heads=128,
n_lays=24,
n_pos=512,
eps=1e-06,
y_prev=True,
task_params=dict(
summarization=dict(
early_stop=True,
len_penalty=2.0,
max_len=200,
min_len=30,
s_no_repeat_ngram=3,
n_beams=4,
prefix="summarize: ",
),
translation_en_to_de=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to German: ",
),
translation_en_to_ro=dict(
early_stop=True,
max_len=300,
n_beams=4,
prefix="translate English to Romanian: ",
),
),
),
}
class Onnx:
@property
def inputs(self):
y = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
y["mask"][1] = "past_encoder_sequence + sequence"
y["decoder_input_ids"] = {0: "batch"}
y["dec_m"] = {
0: "batch",
1: "past_decoder_sequence + sequence",
}
else:
y["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
y["dec_m"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(y, direction="inputs")
return y
@property
def default_onnx_opset(self):
return 13
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,547
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/base/judgment.py
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
from .conflict import Conflict
from .conjecture import Dissent
class Judgment(Node):
claims = conflicts = dissents = None
def __init__(self,
text=None,
conflicts=None,
dissents=None,
authority=None,
**kw):
super().__init__(**kw)
if self.claims is None:
self.claims, self.conflicts, self.dissents = [], [], []
if text:
for k in ('factor', 'bias', 'weight'):
kw.pop(k, None)
self.claims.append(Claim(text=text, **kw))
if conflicts:
fs = (f.strip() for f in conflicts.split('|') if ':' in f)
self.conflicts.extend(Conflict.create(name=f) for f in fs if f)
if dissents:
ds = (d.strip() for d in dissents.split('|') if ':' in d)
self.dissents.extend(Dissent.create(name=d) for d in ds if d)
if authority:
self.authority = Authority.create(name=authority)
@property
def weight(self):
cs = tuple(c.weight for c in self.claims)
fs = tuple(f.weight for f in self.conflicts)
ds = tuple(d.weight for d in self.dissents)
return self.partial(cs, fs, ds) + self.bias
@property
def turmoil(self):
return self.weight
@property
def value(self):
t = self.turmoil
return '{} {}: T{}'.format(super().value, self.authority.agency, t)
@property
def fields(self):
fs = super().fields
fs['Judgment'] = self.name
ls = []
for c in self.claims:
fs2 = c.fields
fs2.update(fs)
fs2['Turmoil'] = self.partial(c.weight)
ls.append(fs2)
for f in sorted(self.conflicts, key=lambda f: f.sequence):
fs2 = f.fields
fs2['Topic'] = fs['Topic']
fs2['Narrative'] = fs['Narrative']
fs2['Judgment'] = fs['Judgment']
fs2['Turmoil'] = self.partial(f.weight)
ls.append(fs2)
for d in sorted(self.dissents, key=lambda d: d.sequence):
fs2 = d.fields
fs2['Topic'] = fs['Topic']
fs2['Narrative'] = fs['Narrative']
fs2['Judgment'] = fs['Judgment']
fs2['Turmoil'] = self.partial(d.weight)
ls.append(fs2)
return ls
class Validation(Judgment):
sign = '=v'
_factor = 0
class Confusion(Judgment):
sign = '=c'
_factor = 0.25
class Bias(Judgment):
sign = '=b'
_factor = 0.5
class Disregard(Judgment):
sign = '=g'
_factor = 0.75
class Fabrication(Judgment):
sign = '=f'
_factor = 1
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,548
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/splinter.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
log = logging.get_logger(__name__)
LIST = [
"tau/splinter-base",
"tau/splinter-base-qass",
"tau/splinter-large",
"tau/splinter-large-qass",
]
class SplinterEmbeddings(qc.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD)
self.position_embeddings = qc.Embed(config.n_pos, config.d_model)
self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1)))
self.pos_type = getattr(config, "pos_type", "absolute")
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.pos_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.norm(embeddings)
embeddings = self.drop(embeddings)
return embeddings
class SplinterSelfAttention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"):
raise ValueError(
f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
f"heads ({config.n_heads})"
)
self.n_heads = config.n_heads
self.attention_head_size = int(config.d_model / config.n_heads)
self.all_head_size = self.n_heads * self.attention_head_size
self.query = qc.Linear(config.d_model, self.all_head_size)
self.key = qc.Linear(config.d_model, self.all_head_size)
self.value = qc.Linear(config.d_model, self.all_head_size)
self.drop = qc.Dropout(config.drop_attn)
self.pos_type = pos_type or getattr(config, "pos_type", "absolute")
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
self.n_pos = config.n_pos
self.distance_embedding = qc.Embed(2 * config.n_pos - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hiddens)
is_cross_attention = enc_hiddens is not None
if is_cross_attention and past_key_value is not None:
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(enc_hiddens))
value_layer = self.transpose_for_scores(self.value(enc_hiddens))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hiddens))
value_layer = self.transpose_for_scores(self.value(hiddens))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
past_key_value = (key_layer, value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.pos_type == "relative_key" or self.pos_type == "relative_key_query":
seq_length = hiddens.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
-1, 1
)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view(
1, -1
)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.n_pos - 1)
positional_embedding = positional_embedding.to(
dtype=query_layer.dtype
) # fp16 compatibility
if self.pos_type == "relative_key":
relative_position_scores = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
attention_scores = attention_scores + relative_position_scores
elif self.pos_type == "relative_key_query":
relative_position_scores_query = torch.einsum(
"bhld,lrd->bhlr", query_layer, positional_embedding
)
relative_position_scores_key = torch.einsum(
"bhrd,lrd->bhlr", key_layer, positional_embedding
)
attention_scores = (
attention_scores + relative_position_scores_query + relative_position_scores_key
)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.drop(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class SplinterSelfOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_model, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, hiddens, input_tensor):
hiddens = self.dense(hiddens)
hiddens = self.drop(hiddens)
hiddens = self.norm(hiddens + input_tensor)
return hiddens
class Attention(qc.Module):
def __init__(self, config, pos_type=None):
super().__init__()
self.self = SplinterSelfAttention(config, pos_type=pos_type)
self.output = SplinterSelfOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hiddens,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hiddens)
outputs = (attention_output,) + self_outputs[1:] # add attns if we output them
return outputs
class SplinterIntermediate(qc.Module):
def __init__(self, cfg):
super().__init__()
self.dense = qc.Linear(cfg.d_model, cfg.d_ff)
self.act = qu.activation(cfg.act)
def forward(self, x):
y = self.dense(x)
y = self.act(y)
return y
class SplinterOutput(qc.Module):
def __init__(self, config):
super().__init__()
self.dense = qc.Linear(config.d_ff, config.d_model)
self.norm = qc.LayerNorm(config.d_model, eps=config.eps)
self.drop = qc.Dropout(config.drop)
def forward(self, y, input_tensor):
y = self.dense(y)
y = self.drop(y)
y = self.norm(y + input_tensor)
return y
class Layer(qc.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(
f"{self} should be used as a decoder model if cross attention is added"
)
self.crossattention = Attention(config, pos_type="absolute")
self.intermediate = SplinterIntermediate(config)
self.output = SplinterOutput(config)
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hiddens,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:]
cross_attn_present_key_value = None
if self.is_decoder and enc_hiddens is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
enc_hiddens,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class Encoder(qc.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)])
self.gradient_checkpointing = False
def forward(
self,
hiddens,
attention_mask=None,
head_mask=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if y_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = caches[i] if caches is not None else None
if self.gradient_checkpointing and self.training:
if y_cache:
log.warning(
"`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..."
)
y_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hiddens,
attention_mask,
layer_head_mask,
enc_hiddens,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hiddens = layer_outputs[0]
if y_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hiddens,)
if not return_dict:
return tuple(
v
for v in [
hiddens,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return qo.CachesCrosses(
y=hiddens,
caches=next_decoder_cache,
hiddens=all_hidden_states,
attns=all_self_attentions,
crosses=all_cross_attentions,
)
class Model(PreTrained):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = SplinterEmbeddings(config)
self.encoder = Encoder(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
enc_hiddens=None,
encoder_attention_mask=None,
caches=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
y_cache = y_cache if y_cache is not None else self.config.y_cache
else:
y_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = caches[0][0].shape[2] if caches is not None else 0
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
if self.config.is_decoder and enc_hiddens is not None:
encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.n_lays)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
enc_hiddens=enc_hiddens,
encoder_attention_mask=encoder_extended_attention_mask,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return qo.CachesCrosses(
y=sequence_output,
caches=encoder_outputs.caches,
hiddens=encoder_outputs.hiddens,
attns=encoder_outputs.attns,
crosses=encoder_outputs.crosses,
)
class SplinterFullyConnectedLayer(qc.Module):
def __init__(self, input_dim, output_dim, act="gelu"):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dense = qc.Linear(self.input_dim, self.output_dim)
self.act = qu.activation(act)
self.norm = qc.LayerNorm(self.output_dim)
def forward(self, inputs):
hiddens = self.dense(inputs)
hiddens = self.act(hiddens)
hiddens = self.norm(hiddens)
return hiddens
class QuestionAwareSpanSelectionHead(qc.Module):
def __init__(self, config):
super().__init__()
self.query_start_transform = SplinterFullyConnectedLayer(config.d_model, config.d_model)
self.query_end_transform = SplinterFullyConnectedLayer(config.d_model, config.d_model)
self.start_transform = SplinterFullyConnectedLayer(config.d_model, config.d_model)
self.end_transform = SplinterFullyConnectedLayer(config.d_model, config.d_model)
self.start_classifier = qc.Linear(config.d_model, config.d_model, bias=False)
self.end_classifier = qc.Linear(config.d_model, config.d_model, bias=False)
def forward(self, inputs, positions):
_, _, dim = inputs.size()
index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim]
gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim]
query_start_reps = self.query_start_transform(
gathered_reps
) # [batch_size, num_positions, dim]
query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim]
start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim]
end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim]
hiddens = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim]
start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length]
logits_beg = torch.matmul(hiddens, start_reps)
hiddens = self.end_classifier(query_end_reps)
end_reps = end_reps.permute(0, 2, 1)
logits_end = torch.matmul(hiddens, end_reps)
return logits_beg, logits_end
class ForQA(PreTrained):
def __init__(self, **kw):
super().__init__(**kw)
cfg = self.get_cfg(kw)
self.model = Model(add_pool=False, **kw)
self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
question_positions=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
question_positions_were_none = False
if question_positions is None:
if input_ids is not None:
question_position_for_each_example = torch.argmax(
(torch.eq(input_ids, self.question_token_id)).int(), dim=-1
)
else:
question_position_for_each_example = torch.zeros(
inputs_embeds.size(0),
dtype=torch.long,
layout=inputs_embeds.layout,
device=inputs_embeds.device,
)
question_positions = question_position_for_each_example.unsqueeze(-1)
question_positions_were_none = True
outputs = self.splinter(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits_beg, logits_end = self.splinter_qass(sequence_output, question_positions)
if question_positions_were_none:
logits_beg, logits_end = logits_beg.squeeze(1), logits_end.squeeze(1)
if attention_mask is not None:
logits_beg = logits_beg + (1 - attention_mask) * -10000.0
logits_end = logits_end + (1 - attention_mask) * -10000.0
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = logits_beg.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(logits_beg, start_positions)
end_loss = loss_fct(logits_end, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (logits_beg, logits_end) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return qo.LossQA(
loss=total_loss,
logits_beg=logits_beg,
logits_end=logits_end,
hiddens=outputs.hiddens,
attns=outputs.attns,
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,549
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/operators/test_blocksparse.py
|
import pytest
import torch
import triton
import triton.ops
def sparsify_tensor(x, mask, block):
ret = torch.empty((x.size(0), mask.sum(), block, block), dtype=x.dtype, device=x.device)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block]
return ret
def make_pair(shape, device="cuda", alpha=1e-2, beta=0., trans=False, data=None, dtype=torch.float32):
if data is None:
data = torch.randn(shape, dtype=torch.float32, requires_grad=True, device=device)
ref_ret = data
ref_ret = ref_ret * alpha + beta
ref_ret = ref_ret.half().to(dtype)
if trans:
ref_ret = ref_ret.t().requires_grad_()
ref_ret = ref_ret.detach().requires_grad_()
tri_ret = ref_ret.clone().detach().requires_grad_()
return ref_ret, tri_ret
def mask_tensor(x, mask, block, value=0):
ret = x.clone()
for h, i, j in zip(*(mask == 0).nonzero(as_tuple=True)):
ret[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block] = value
return ret
@pytest.mark.parametrize("MODE", ["sdd", "dds", "dsd"])
@pytest.mark.parametrize("TRANS_A", [False, True])
@pytest.mark.parametrize("TRANS_B", [False, True])
@pytest.mark.parametrize("BLOCK", [16, 32, 64])
@pytest.mark.parametrize("DTYPE", [torch.float16])
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256):
seed = 0
torch.manual_seed(seed)
is_sdd = MODE == "sdd"
is_dsd = MODE == "dsd"
is_dds = MODE == "dds"
do_sparsify = lambda x: sparsify_tensor(x, layout, BLOCK)
do_mask = lambda x: mask_tensor(x, layout, BLOCK)
# create inputs
# create op
a_shape = (Z, H, K, M) if TRANS_A else (Z, H, M, K)
b_shape = (Z, H, N, K) if TRANS_B else (Z, H, K, N)
c_shape = (Z, H, M, N)
shape = {
"sdd": (M, N),
"dsd": (a_shape[2], a_shape[3]),
"dds": (b_shape[2], b_shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# create data
a_ref, a_tri = make_pair(a_shape, alpha=.1, dtype=DTYPE)
b_ref, b_tri = make_pair(b_shape, alpha=.1, dtype=DTYPE)
dc_ref, dc_tri = make_pair(c_shape, dtype=DTYPE)
# compute [torch]
dc_ref = do_mask(dc_ref) if is_sdd else dc_ref
a_ref = do_mask(a_ref) if is_dsd else a_ref
b_ref = do_mask(b_ref) if is_dds else b_ref
a_ref.retain_grad()
b_ref.retain_grad()
c_ref = torch.matmul(a_ref.transpose(2, 3) if TRANS_A else a_ref,
b_ref.transpose(2, 3) if TRANS_B else b_ref)
c_ref.backward(dc_ref)
c_ref = do_sparsify(c_ref) if is_sdd else c_ref
da_ref = do_sparsify(a_ref.grad) if is_dsd else a_ref.grad
db_ref = do_sparsify(b_ref.grad) if is_dds else b_ref.grad
# triton result
dc_tri = do_sparsify(dc_tri) if is_sdd else dc_tri
a_tri = do_sparsify(a_tri) if is_dsd else a_tri
b_tri = do_sparsify(b_tri) if is_dds else b_tri
a_tri.retain_grad()
b_tri.retain_grad()
op = triton.ops.blocksparse.matmul(layout, BLOCK, MODE, trans_a=TRANS_A, trans_b=TRANS_B, device="cuda")
c_tri = op(a_tri, b_tri)
c_tri.backward(dc_tri)
da_tri = a_tri.grad
db_tri = b_tri.grad
# compare
torch.testing.assert_allclose(c_ref, c_tri)
torch.testing.assert_allclose(da_ref, da_tri)
torch.testing.assert_allclose(db_ref, db_tri)
configs = [
(16, 256),
(32, 576),
(64, 1871),
(128, 2511),
]
@pytest.mark.parametrize("is_dense", [False, True])
@pytest.mark.parametrize("BLOCK, WIDTH", configs)
def test_softmax(BLOCK, WIDTH, is_dense, Z=2, H=2, is_causal=True, scale=0.4):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 3, WIDTH, WIDTH
# initialize layout
# make sure each row has at least one non-zero element
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
if is_dense:
layout[:] = 1
else:
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# initialize data
a_shape = (Z, H, M, N)
a_ref, a_tri = make_pair(a_shape)
dout_ref, dout_tri = make_pair(a_shape)
# compute [torch]
a_ref = mask_tensor(a_ref, layout, BLOCK, value=float("-inf"))
a_ref.retain_grad()
at_mask = torch.ones((M, N), device="cuda")
if is_causal:
at_mask = torch.tril(at_mask)
M = at_mask[None, None, :, :] + torch.zeros_like(a_ref)
a_ref[M == 0] = float("-inf")
out_ref = torch.softmax(a_ref * scale, -1)
out_ref.backward(dout_ref)
out_ref = sparsify_tensor(out_ref, layout, BLOCK)
da_ref = sparsify_tensor(a_ref.grad, layout, BLOCK)
# compute [triton]
a_tri = sparsify_tensor(a_tri, layout, BLOCK)
a_tri.retain_grad()
dout_tri = sparsify_tensor(dout_tri, layout, BLOCK)
op = triton.ops.blocksparse.softmax(layout, BLOCK, device="cuda", is_dense=is_dense)
out_tri = op(a_tri, scale=scale, is_causal=is_causal)
out_tri.backward(dout_tri)
da_tri = a_tri.grad
# compare
torch.testing.assert_allclose(out_tri, out_ref)
torch.testing.assert_allclose(da_tri, da_ref)
@pytest.mark.parametrize("block", [16, 32, 64])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_attention_fwd_bwd(
block,
dtype,
input_scale=1.0,
scale=1 / 8.0,
n_ctx=256,
batch_size=2,
n_heads=2,
):
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
# inputs
qkv_shape = (batch_size, n_heads, n_ctx, 64)
qkvs = [
torch.nn.Parameter(input_scale * torch.randn(qkv_shape), requires_grad=True).to(dtype).cuda() for _ in range(3)
]
# Triton:
n_blocks = n_ctx // block
layout = torch.tril(torch.ones([n_heads, n_blocks, n_blocks], dtype=torch.long))
query, key, value = [x.clone() for x in qkvs]
query.retain_grad()
key.retain_grad()
value.retain_grad()
attn_out = triton_attention(layout, block, query=query, key=key, value=value, scale=scale)
# ad hoc loss
loss = (attn_out ** 2).mean()
loss.backward()
grads = [query.grad, key.grad, value.grad]
# Torch version:
torch_q, torch_k, torch_v = [x.clone() for x in qkvs]
attn_mask = torch.ones([n_ctx, n_ctx], device="cuda", dtype=dtype)
attn_mask = torch.tril(attn_mask, diagonal=0)
attn_mask = 1e6 * (-1 + (attn_mask.reshape((1, 1, n_ctx, n_ctx)).cuda()))
torch_q.retain_grad()
torch_k.retain_grad()
torch_v.retain_grad()
scores = scale * torch.einsum("bhsd,bhtd->bhst", torch_q, torch_k)
scores = scores + attn_mask
probs = torch.softmax(scores, dim=-1)
torch_attn_out = torch.einsum("bhst,bhtd->bhsd", probs, torch_v)
# ad hoc loss
torch_loss = (torch_attn_out ** 2).mean()
torch_loss.backward()
torch_grads = [torch_q.grad, torch_k.grad, torch_v.grad]
# comparison
# print(f"Triton loss {loss} and torch loss {torch_loss}. Also checking grads...")
torch.testing.assert_allclose(loss, torch_loss, atol=1e-3, rtol=0)
for g1, g2 in zip(grads, torch_grads):
torch.testing.assert_allclose(g1, g2)
@pytest.mark.parametrize("block", [16, 32, 64])
def triton_attention(
layout,
block: int,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
scale: float,
):
sparse_dot_sdd_nt = triton.ops.blocksparse.matmul(layout, block, "sdd", trans_a=False, trans_b=True, device=value.device)
sparse_dot_dsd_nn = triton.ops.blocksparse.matmul(layout, block, "dsd", trans_a=False, trans_b=False, device=value.device)
sparse_softmax = triton.ops.blocksparse.softmax(layout, block, device=value.device)
w = sparse_dot_sdd_nt(query, key)
w = sparse_softmax(w, scale=scale, is_causal=True)
a = sparse_dot_dsd_nn(w, value)
return a
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,550
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/test/unit/operators/test_cross_entropy.py
|
import pytest
import torch
import triton
import triton.ops
@pytest.mark.parametrize("M, N, dtype, mode",
[
(M, N, dtype, mode) for M in [1024, 821]
for N in [512, 857, 1871, 2089, 8573, 31000]
for dtype in ['float16', 'float32']
for mode in ['forward', 'backward']
]
)
def test_op(M, N, dtype, mode):
capability = torch.cuda.get_device_capability()
if capability[0] < 8 and dtype == "bfloat16":
pytest.skip("Only test bfloat16 on devices with sm >= 80")
dtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype]
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
# forward pass
tt_y = triton.ops.cross_entropy(x, idx)
th_y = torch.nn.CrossEntropyLoss(reduction="none")(x, idx)
if mode == 'forward':
torch.testing.assert_allclose(th_y, tt_y)
# backward pass
elif mode == 'backward':
dy = torch.randn_like(tt_y)
# triton backward
tt_y.backward(dy)
tt_dx = x.grad.clone()
# torch backward
x.grad.zero_()
th_y.backward(dy)
th_dx = x.grad.clone()
torch.testing.assert_allclose(th_dx, tt_dx)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,551
|
quantapix/qnarre
|
refs/heads/main
|
/tools/triton/python/triton/tools/build_extern.py
|
import argparse
import subprocess
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
class Symbol:
_name: str
_op_name: str
_ret_type: str
_arg_names: List[str]
_arg_types: List[str]
def __init__(
self,
name: str,
op_name: str,
ret_type: str,
arg_names: List[str],
arg_types: List[str],
) -> None:
'''
A symbol is a function declaration.
:param name: name of the symbol
:param op_name: name of the operation
:param ret_type: return type of the operation
:param arg_names: names of the arguments
:param arg_types: types of the arguments
'''
self._name = name
self._op_name = op_name
self._ret_type = ret_type
self._arg_names = list(arg_names)
self._arg_types = list(arg_types)
@property
def name(self) -> str:
return self._name
@property
def op_name(self) -> str:
return self._op_name
@property
def ret_type(self) -> str:
return self._ret_type
@property
def arg_names(self) -> List[str]:
return self._arg_names
@property
def arg_types(self) -> List[str]:
return self._arg_types
def convert_type(type_str) -> Optional[str]:
if type_str == "i32":
return "int32"
elif type_str == "u32":
return "uint32"
elif type_str == "i64":
return "int64"
elif type_str == "u64":
return "uint64"
elif type_str == "float":
return "fp32"
elif type_str == "double":
return "fp64"
else:
# ignore other types, such as pointer types
return None
def to_unsigned(type_str) -> str:
if type_str == "int32":
return "uint32"
elif type_str == "int64":
return "uint64"
else:
return type_str
class ExternLibrary(ABC):
_name: str
_path: str
_symbols: Dict[str, Symbol]
_format: bool
_grouping: bool
def __init__(
self,
name: str,
path: str,
format: bool = True,
grouping: bool = True,
) -> None:
'''
Abstract class for extern library.
:param name: name of the library
:param path: path of the library
:param format: whether to format the generated stub file
'''
self._name = name
self._path = path
self._symbols = {}
self._format = format
self._grouping = grouping
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@property
def symbols(self) -> Dict[str, Symbol]:
return self._symbols
@property
def grouping(self) -> bool:
return self._grouping
@abstractmethod
def parse_symbols(self, input_file) -> None:
pass
@abstractmethod
def _output_stubs(self) -> str:
pass
def generate_stub_file(self, output_dir) -> None:
file_str = self._output_stubs()
if file_str is None or len(file_str) == 0:
raise Exception("file_str is empty")
output_file = f"{output_dir}/{self._name}.py"
with open(output_file, "w") as f:
f.write(file_str)
f.close()
if self._format:
subprocess.Popen(["autopep8", "-a", "-r", "-i", output_file],
stdout=subprocess.PIPE).communicate()
subprocess.Popen(["isort", output_file], stdout=subprocess.PIPE).communicate()
class Libdevice(ExternLibrary):
_symbol_groups: Dict[str, List[Symbol]]
def __init__(self, path) -> None:
'''
Constructor for Libdevice.
:param path: path of the libdevice library
'''
super().__init__("libdevice", path)
self._symbol_groups = {}
self.is_pure = True
@staticmethod
def _extract_symbol(line) -> Optional[Symbol]:
# Extract symbols from line in the following format:
# "define [internal] <ret_type> @<name>(<arg_types>,)"
entries = line.split("@")
ret_str = entries[0]
func_str = entries[1]
# Get ret_type, skip internal symbols
ret_strs = ret_str.split()
if ret_strs[1] == "internal":
return None
ret_type = convert_type(ret_strs[1])
if ret_type is None:
return None
# Get function name
func_strs = func_str.split("(")
func_name = func_strs[0].replace("@", "")
op_name = func_name.replace("__nv_", "")
if 'ieee' in op_name:
return None
# Get arg_types
arg_strs = func_strs[1].split(",")
arg_types = []
arg_names = []
for i, arg_str in enumerate(arg_strs):
arg_type = convert_type(arg_str.split()[0])
if arg_type is None:
return None
arg_name = 'arg' + str(i)
arg_types.append(arg_type)
arg_names.append(arg_name)
if op_name == "sad":
# Special case for sad, where the last argument is an unsigned int
arg_types[-1] = to_unsigned(arg_types[-1])
elif op_name.startswith("u"):
# LLVM does not differentiate between signed and unsigned integer type.
# We have to convert the types to unsigned
ret_type = to_unsigned(ret_type)
for i, arg_type in enumerate(arg_types):
arg_types[i] = to_unsigned(arg_type)
return Symbol(func_name, op_name, ret_type, arg_names, arg_types)
def _group_symbols(self) -> None:
symbol_set = {}
for symbol in self._symbols.values():
op_name = symbol.op_name
symbol_set[op_name] = symbol
# Group functions together by renaming.
renaming = {
'llabs': 'abs', 'acosf': 'acos', 'acoshf': 'acosh',
'dadd_rd': 'add_rd', 'fadd_rd': 'add_rd', 'dadd_rn': 'add_rn',
'fadd_rn': 'add_rn', 'dadd_ru': 'add_ru', 'fadd_ru': 'add_ru',
'dadd_rz': 'add_rz', 'fadd_rz': 'add_rz', 'asinf': 'asin',
'asinhf': 'asinh', 'atanf': 'atan', 'atan2f': 'atan2',
'atanhf': 'atanh', 'brevll': 'brev', 'cbrtf': 'cbrt',
'ceilf': 'ceil', 'clzll': 'clz', 'copysignf': 'copysign',
'cosf': 'cos', 'coshf': 'cosh', 'cospif': 'cospi',
'cyl_bessel_i0f': 'cyl_bessel_i0', 'cyl_bessel_i1f': 'cyl_bessel_i1',
'fdiv_rd': 'div_rd', 'ddiv_rd': 'div_rd', 'fdiv_rn': 'div_rn',
'ddiv_rn': 'div_rn', 'fdiv_ru': 'div_ru', 'ddiv_ru': 'div_ru',
'fdiv_rz': 'div_rz', 'ddiv_rz': 'div_rz', 'erff': 'erf',
'erfcf': 'erfc', 'erfcinvf': 'erfcinv', 'erfcxf': 'erfcx',
'erfinvf': 'erfinv', 'expf': 'exp', 'exp10f': 'exp10',
'exp2f': 'exp2', 'expm1f': 'expm1', 'fabsf': 'abs',
'fabs': 'abs', 'fast_fdividef': 'fast_dividef',
'fdimf': 'fdim', 'ffsll': 'ffs', 'floorf': 'floor',
'fmaf': 'fma', 'fmaf_rd': 'fma_rd', 'fmaf_rn': 'fma_rn',
'fmaf_ru': 'fma_ru', 'fmaf_rz': 'fma_rz', 'fmodf': 'fmod',
'uhadd': 'hadd', 'hypotf': 'hypot', 'ilogbf': 'ilogb',
'isinff': 'isinf', 'isinfd': 'isinf', 'isnanf': 'isnan',
'isnand': 'isnan', 'j0f': 'j0', 'j1f': 'j1', 'jnf': 'jn',
'ldexpf': 'ldexp', 'lgammaf': 'lgamma', 'llrintf': 'llrint',
'llroundf': 'llround', 'logf': 'log', 'log10f': 'log10',
'log1pf': 'log1p', 'log2f': 'log2', 'logbf': 'logb',
'umax': 'max', 'llmax': 'max', 'ullmax': 'max', 'fmaxf': 'max',
'fmax': 'max', 'umin': 'min', 'llmin': 'min', 'ullmin': 'min',
'fminf': 'min', 'fmin': 'min', 'dmul_rd': 'mul_rd', 'fmul_rd': 'mul_rd',
'dmul_rn': 'mul_rn', 'fmul_rn': 'mul_rn', 'dmul_ru': 'mul_ru',
'fmul_ru': 'mul_ru', 'dmul_rz': 'mul_rz', 'fmul_rz': 'mul_rz',
'umul24': 'mul24', 'umulhi': 'mulhi', 'mul64hi': 'mulhi',
'umul64hi': 'mulhi', 'nearbyintf': 'nearbyint', 'nextafterf': 'nextafter',
'norm3df': 'norm3d', 'norm4df': 'norm4d', 'normcdff': 'normcdf',
'normcdfinvf': 'normcdfinv', 'popcll': 'popc', 'powif': 'pow', 'powi': 'pow',
'powf': 'pow', 'rcbrtf': 'rcbrt', 'frcp_rd': 'rcp_rd', 'drcp_rd': 'rcp_rd',
'frcp_rn': 'rcp_rn', 'drcp_rn': 'rcp_rn', 'frcp_ru': 'rcp_ru',
'drcp_ru': 'rcp_ru', 'frcp_rz': 'rcp_rz', 'drcp_rz': 'rcp_rz',
'remainderf': 'remainder', 'urhadd': 'rhadd', 'rhypotf': 'rhypot',
'rintf': 'rint', 'rnorm3df': 'rnorm3d', 'rnorm4df': 'rnorm4d',
'roundf': 'round', 'rsqrtf': 'rsqrt', 'frsqrt_rn': 'rsqrt_rn',
'usad': 'sad', 'scalbnf': 'scalbn', 'signbitf': 'signbit',
'signbitd': 'signbit', 'sinf': 'sin', 'sinhf': 'sinh',
'sinpif': 'sinpi', 'sqrtf': 'sqrt', 'fsqrt_rd': 'sqrt_rd',
'dsqrt_rd': 'sqrt_rd', 'fsqrt_rn': 'sqrt_rn', 'dsqrt_rn': 'sqrt_rn',
'fsqrt_ru': 'sqrt_ru', 'dsqrt_ru': 'sqrt_ru', 'fsqrt_rz': 'sqrt_rz',
'dsqrt_rz': 'sqrt_rz', 'fsub_rd': 'sub_rd', 'dsub_rd': 'sub_rd',
'fsub_rn': 'sub_rn', 'dsub_rn': 'sub_rn', 'fsub_ru': 'sub_ru',
'dsub_ru': 'sub_ru', 'fsub_rz': 'sub_rz', 'dsub_rz': 'sub_rz',
'tanf': 'tan', 'tanhf': 'tanh', 'tgammaf': 'tgamma', 'truncf': 'trunc',
'y0f': 'y0', 'y1f': 'y1', 'ynf': 'yn'
}
for symbol in self._symbols.values():
op_name = symbol.op_name
if op_name in renaming:
op_name = renaming[op_name]
symbol._op_name = op_name
if op_name in self._symbol_groups:
self._symbol_groups[op_name].append(symbol)
else:
self._symbol_groups[op_name] = [symbol]
def parse_symbols(self, input_file) -> None:
if len(self.symbols) > 0:
return
output = subprocess.check_output(["grep", "define", input_file]).decode().splitlines()
for line in output:
symbol = self._extract_symbol(line)
if symbol is None:
continue
self._symbols[symbol.name] = symbol
self._group_symbols()
def _output_stubs(self) -> str:
# Generate python functions in the following format:
# @extern.extern
# def <op_name>(<args>, _builder=None):
# arg_type_symbol_dict = {[arg_type]: {(symbol, ret_type)}}
# return core.extern_elementwise("libdevice", <path>, <args>, <arg_type_symbol_dict>, _builder)
import_str = "from . import core\n"
import_str += "import os\n"
import_str += "import functools\n"
header_str = ""
header_str += "@functools.lru_cache()\n"
header_str += "def libdevice_path():\n"
header_str += " import torch\n"
header_str += " third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"third_party\")\n"
header_str += " if torch.version.hip is None:\n"
header_str += " default = os.path.join(third_party_dir, \"cuda\", \"lib\", \"libdevice.10.bc\")\n"
header_str += " else:\n"
header_str += " default = ''\n"
header_str += " return os.getenv(\"TRITON_LIBDEVICE_PATH\", default)\n"
func_str = ""
for symbols in self._symbol_groups.values():
func_str += "@core.extern\n"
func_name_str = f"def {symbols[0].op_name}("
for arg_name in symbols[0].arg_names:
func_name_str += f"{arg_name}, "
func_name_str += "_builder=None):\n"
return_str = f"\treturn core.extern_elementwise(\"{self._name}\", libdevice_path(), ["
for arg_name in symbols[0].arg_names:
return_str += f"{arg_name}, "
return_str += "], \n"
arg_type_symbol_dict_str = "{"
for symbol in symbols:
arg_type_symbol_dict_str += "("
for arg_type in symbol.arg_types:
arg_type_symbol_dict_str += f'core.dtype("{arg_type}"),'
ret_type = f'core.dtype("{symbol.ret_type}")'
arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n"
arg_type_symbol_dict_str += "}"
return_str += arg_type_symbol_dict_str
return_str += f", is_pure={self.is_pure}"
return_str += ", _builder=_builder)\n"
func_str += func_name_str + return_str + "\n"
file_str = import_str + header_str + func_str
return file_str
class LLVMDisassembler:
_path: str
_ll_file: str
def __init__(self, path) -> None:
'''
Invoke llvm-dis to disassemble the given file.
:param path: path to llvm-dis
'''
self._path = path
self._ll_file = "/tmp/extern_lib.ll"
def disasm(self, lib_path: str) -> None:
subprocess.Popen([self._path, lib_path, "-o", self.ll_file],
stdout=subprocess.PIPE).communicate()
@property
def ll_file(self) -> str:
return self._ll_file
@property
def path(self) -> str:
return self._path
extern_libs = ["libdevice"]
def build(
llvm_dis_path: str,
lib_path: str,
lib_name: str,
output_dir: str,
) -> None:
'''
Interface function to build the library file.
:param llvm_dis_path: path to the llvm-dis binary
:param lib_path: path to the external library file
:param lib_name: name of the library
:param output_dir: path to the output directory
'''
if lib_name == "libdevice":
extern_lib = Libdevice(lib_path)
else:
raise Exception(f"Unknown extern library: {lib_name}")
llvm_disassembler = LLVMDisassembler(llvm_dis_path)
llvm_disassembler.disasm(lib_path)
extern_lib.parse_symbols(llvm_disassembler.ll_file)
extern_lib.generate_stub_file(output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--llvm-dis", dest="llvm_dis_path", help="Path to llvm-dis", default="llvm-dis")
parser.add_argument("--lib-path", dest="lib_path", help="Path to the extern library")
parser.add_argument("--lib-name", dest="lib_name", help="Name of the extern library")
parser.add_argument("--output", dest="output_dir", help="Output file path", default="/tmp/")
args = parser.parse_args()
build(args.llvm_dis_path, args.lib_path, args.lib_name, args.output_dir)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,552
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/try/07-math-functions.py
|
"""
Libdevice (`tl.math`) function
==============================
Triton can invoke a custom function from an external library.
In this example, we will use the `libdevice` library (a.k.a `math` in triton) to apply `asin` on a tensor.
Please refer to https://docs.nvidia.com/cuda/libdevice-users-guide/index.html regarding the semantics of all available libdevice functions.
In `triton/language/math.py`, we try to aggregate functions with the same computation but different data types together.
For example, both `__nv_asin` and `__nvasinf` calculate the principal value of the arc sine of the input, but `__nv_asin` operates on `double` and `__nv_asinf` operates on `float`.
Using triton, you can simply call `tl.math.asin`.
Triton automatically selects the correct underlying device function to invoke based on input and output types.
"""
# %%
# asin Kernel
# ------------
import torch
import triton
import triton.language as tl
@triton.jit
def asin_kernel(
x_ptr,
y_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
x = tl.math.asin(x)
tl.store(y_ptr + offsets, x, mask=mask)
# %%
# Using the default libdevice library path
# -----------------------------------------
# We can use the default libdevice library path encoded in `triton/language/math.py`
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
output_triton = torch.zeros(size, device='cuda')
output_torch = torch.asin(x)
assert x.is_cuda and output_triton.is_cuda
n_elements = output_torch.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024)
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
# %%
# Customize the libdevice library path
# -------------------------------------
# We can also customize the libdevice library path by passing the path to the `libdevice` library to the `asin` kernel.
output_triton = torch.empty_like(x)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024,
extern_libs={'libdevice': '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'})
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,553
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/models/rag.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from transformers.utils import logging
from torch.utils.checkpoint import checkpoint
from .. import core as qc
from ..core import utils as qu
from ..core import output as qo
from ..core import attention as qa
from ..core.embed import Embed
from ..core.mlp import Classifier, MLP, Predictor, Pool
from ..prep.config.bert import PreTrained
from dataclasses import dataclass
from ...generation_beam_search import BeamSearchScorer
from ...generation_logits_process import LogitsProcessorList
from ...generation_stopping_criteria import StoppingCriteriaList
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
log = logging.get_logger(__name__)
@dataclass
class RetrievAugLMMarginOutput(ModelOutput):
loss = None
logits = None
doc_scores = None
caches = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
context_input_ids = None
context_attention_mask = None
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
generator_enc_last_hidden_state = None
generator_enc_hidden_states = None
generator_enc_attentions = None
generator_dec_hidden_states = None
generator_dec_attentions = None
generator_cross_attentions = None
@dataclass
class RetrievAugLMOutput(ModelOutput):
logits = None
doc_scores = None
caches = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
context_input_ids = None
context_attention_mask = None
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
generator_enc_last_hidden_state = None
generator_enc_hidden_states = None
generator_enc_attentions = None
generator_dec_hidden_states = None
generator_dec_attentions = None
generator_cross_attentions = None
class Model(PreTrained):
def __init__(
self,
config=None,
question_encoder=None,
generator=None,
retriever=None,
**kw,
):
assert config is not None or (question_encoder is not None and generator is not None)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kw
)
else:
assert isinstance(config, self.config_class)
super().__init__(config)
if question_encoder is None:
from ..auto.modeling_auto import AutoModel
question_encoder = AutoModel.from_config(config.question_encoder)
if generator is None:
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
generator = AutoModelForSeq2SeqLM.from_config(config.generator)
self.retriever = retriever
if self.retriever is not None:
self.retriever = retriever
self.question_encoder = question_encoder
self.generator = generator
self.ctx_encoder = None
self.context_encoder_training = False
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
caches=None,
doc_scores=None,
context_input_ids=None,
context_attention_mask=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
n_docs=None,
):
n_docs = n_docs if n_docs is not None else self.config.n_docs
y_cache = y_cache if y_cache is not None else self.config.y_cache
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
output_retrieved = (
output_retrieved if output_retrieved is not None else self.config.output_retrieved
)
# whether retriever has to be used
has_to_retrieve = (
self.retriever is not None
and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
and encoder_outputs is None
)
# encoder_outputs are pre-computed during RAG-token generation
if encoder_outputs is None:
if has_to_retrieve:
question_enc_outputs = self.question_encoder(
input_ids, attention_mask=attention_mask, return_dict=True
)
question_encoder_last_hidden_state = question_enc_outputs[
0
] # hidden states of question encoder
retriever_outputs = self.retriever(
input_ids,
question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)
if self.context_encoder_training:
(
context_input_ids,
context_attention_mask,
retrieved_doc_embeds,
retrived_doc_input_ids,
retrived_doc_attention_mask,
retrieved_doc_ids,
) = (
retriever_outputs["context_input_ids"],
retriever_outputs["context_attention_mask"],
retriever_outputs["retrieved_doc_embeds"],
retriever_outputs["tokenized_doc_ids"],
retriever_outputs["tokenized_doc_attention_mask"],
retriever_outputs["doc_ids"],
)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids)
retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids)
retrieved_doc_embeds = self.ctx_encoder(
retrived_doc_input_ids,
attention_mask=retrived_doc_attention_mask,
return_dict=True,
).pools
retrieved_doc_embeds = retrieved_doc_embeds.view(
-1, n_docs, question_encoder_last_hidden_state.shape[1]
) # reshaping
# compute doc_scores involving ctx_encoder
doc_scores = torch.bmm(
question_encoder_last_hidden_state.unsqueeze(1),
retrieved_doc_embeds.transpose(1, 2),
).squeeze(1)
else:
(
context_input_ids,
context_attention_mask,
retrieved_doc_embeds,
retrieved_doc_ids,
) = (
retriever_outputs["context_input_ids"],
retriever_outputs["context_attention_mask"],
retriever_outputs["retrieved_doc_embeds"],
retriever_outputs["doc_ids"],
)
# set to correct device
retrieved_doc_embeds = retrieved_doc_embeds.to(
question_encoder_last_hidden_state
)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(
question_encoder_last_hidden_state.unsqueeze(1),
retrieved_doc_embeds.transpose(1, 2),
).squeeze(1)
else:
assert context_input_ids is not None
assert context_attention_mask is not None
assert doc_scores is not None
assert doc_scores is not None
assert (doc_scores.shape[1] % n_docs) == 0
# Decoder input without context documents
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
gen_outputs = self.generator(
input_ids=context_input_ids,
attention_mask=context_attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
return_dict=True,
)
if not has_to_retrieve:
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
else:
question_enc_hidden_states = question_enc_outputs.hiddens
question_enc_attentions = question_enc_outputs.attns
if not has_to_retrieve or not output_retrieved:
# don't output retrieved docs
context_input_ids = (None,)
context_attention_mask = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
return RetrievAugLMOutput(
logits=gen_outputs.logits,
doc_scores=doc_scores,
caches=gen_outputs.caches,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
retrieved_doc_embeds=retrieved_doc_embeds,
retrieved_doc_ids=retrieved_doc_ids,
question_encoder_last_hidden_state=question_encoder_last_hidden_state,
question_enc_hidden_states=question_enc_hidden_states,
question_enc_attentions=question_enc_attentions,
generator_enc_last_hidden_state=gen_outputs.enc_y,
generator_enc_hidden_states=gen_outputs.enc_hiddens,
generator_enc_attentions=gen_outputs.enc_attns,
generator_dec_hidden_states=gen_outputs.hiddens,
generator_dec_attentions=gen_outputs.attns,
generator_cross_attentions=gen_outputs.crosses,
)
class RagSequenceForGeneration(PreTrained):
def __init__(
self,
config=None,
question_encoder=None,
generator=None,
retriever=None,
**kw,
):
assert config is not None or (question_encoder is not None and generator is not None)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kw
)
super().__init__(config)
self.rag = Model(
config=config,
question_encoder=question_encoder,
generator=generator,
retriever=retriever,
)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
def set_context_encoder_for_training(self, ctx_encoder):
self.rag.context_encoder_training = True
self.rag.ctx_encoder = ctx_encoder
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
caches=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
exclude_bos_score=None,
reduce_loss=None,
labels=None,
n_docs=None,
**kw, # needs kw for generation
):
n_docs = n_docs if n_docs is not None else self.config.n_docs
exclude_bos_score = (
exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
)
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
y_cache = False
outputs = self.rag(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
)
loss = None
if labels is not None:
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
decoder_input_ids,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
exclude_bos_score=exclude_bos_score,
n_docs=n_docs,
)
return RetrievAugLMMarginOutput(
loss=loss,
logits=outputs.logits,
doc_scores=outputs.doc_scores,
caches=outputs.caches,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
generator_cross_attentions=outputs.generator_cross_attentions,
)
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@torch.no_grad()
def generate(
self,
input_ids=None,
attention_mask=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
do_deduplication=None, # defaults to True
num_return_sequences=None, # defaults to 1
num_beams=None, # defaults to 1
n_docs=None,
**model_kw,
):
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = (
do_deduplication if do_deduplication is not None else self.config.do_deduplication
)
num_doc_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.config.num_return_sequences
)
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert input_ids is not None or context_input_ids is not None
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(
input_ids, attention_mask=attention_mask
)[0]
context_input_ids = self.retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)["context_input_ids"]
# set to correct device
context_input_ids = context_input_ids.to(input_ids)
hypos = []
model_kw["num_beams"] = num_beams
model_kw["num_return_sequences"] = num_beams
model_kw["attention_mask"] = None
batch_size = (
input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
)
for index in range(batch_size):
# first, generate beams from documents:
generator_input_ids = context_input_ids[
index * n_docs : (index + 1) * n_docs
] # (n_docs, max_len)
output_sequences = self.generator.generate(
generator_input_ids,
**model_kw,
) # n_docs * n_beam, tgt_len
if do_deduplication:
# do_deduplication, max_output_len
output_sequences = torch.stack(
list({str(k.tolist()): k for k in output_sequences}.values())
)
num_candidates = output_sequences.shape[
0
] # after deduplication, this number can be less than n_docs*n_beam
# then, run model forwards to get nll scores:
if input_ids is not None:
new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else: # input_ids is None, need context_input_ids/mask and doc_scores
assert context_attention_mask is not None
assert doc_scores is not None
individual_input_ids = generator_input_ids.repeat(
num_candidates, 1
) # (num_candidates*n_docs, max_len)
individual_attention_mask = context_attention_mask[
index * n_docs : (index + 1) * n_docs
]
individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
individual_doc_scores = doc_scores[
index : (index + 1), :
] # doc_scores.shape = [batch, n_docs]
individual_doc_scores = individual_doc_scores.repeat(
num_candidates, 1
) # [num_candidates, n_docs]
outputs = self(
context_input_ids=individual_input_ids,
context_attention_mask=individual_attention_mask,
doc_scores=individual_doc_scores,
labels=output_sequences,
exclude_bos_score=True,
)
top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
# add hypothesis
hypos.append(output_sequences[top_cand_inds])
return self._cat_and_pad(hypos, PAD=self.config.generator.PAD)
def get_nll(
self,
seq_logits,
doc_scores,
target,
reduce_loss=False,
epsilon=0.0,
exclude_bos_score=False,
n_docs=None,
):
# shift tokens left
target = torch.cat(
[
target[:, 1:],
target.new(target.shape[0], 1).fill_(self.config.generator.PAD),
],
1,
)
n_docs = n_docs if n_docs is not None else self.config.n_docs
# BOS is None for T5
BOS = self.config.BOS or self.config.generator.BOS
use_bos = BOS is not None and target[:, 0].eq(BOS).all()
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.PAD)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
# seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
) # batch_size x n_docs x tgt_len x #s_vocab
doc_logprobs = F.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
# RAG-sequence marginalization
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = torch.cat(
[first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2
)
# calculate loss
target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
# sum over tokens, exclude bos while scoring
ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
smooth_obj = smooth_obj.sum(2)
ll = ll.logsumexp(1) # logsumexp over docs
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
@staticmethod
def _cat_and_pad(tensors, PAD):
output = (
tensors[0]
.new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]))
.fill_(PAD)
)
ind = 0
for t in tensors:
output[ind : ind + t.shape[0], : t.shape[1]] = t
ind += t.shape[0]
return output
class RagTokenForGeneration(PreTrained):
def __init__(
self,
config=None,
question_encoder=None,
generator=None,
retriever=None,
**kw,
):
assert config is not None or (question_encoder is not None and generator is not None)
if config is None:
config = RagConfig.from_question_encoder_generator_configs(
question_encoder.config, generator.config, **kw
)
super().__init__(config)
self.rag = Model(
config=config,
question_encoder=question_encoder,
generator=generator,
retriever=retriever,
)
def marginalize(self, seq_logits, doc_scores, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# RAG-token marginalization
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(
seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
)
doc_logprobs = torch.log_softmax(doc_scores, dim=1)
log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
return torch.logsumexp(log_prob_sum, dim=1)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
caches=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
y_cache=None,
output_attentions=None,
output_hidden_states=None,
output_retrieved=None,
do_marginalize=None,
reduce_loss=None,
labels=None,
n_docs=None,
**kw, # needs kw for generation
):
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_marginalize = (
do_marginalize if do_marginalize is not None else self.config.do_marginalize
)
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
y_cache = False
outputs = self.rag(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
caches=caches,
y_cache=y_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_retrieved=output_retrieved,
n_docs=n_docs,
)
loss = None
logits = outputs.logits
if labels is not None:
assert decoder_input_ids is not None
loss = self.get_nll(
outputs.logits,
outputs.doc_scores,
labels,
reduce_loss=reduce_loss,
epsilon=self.config.label_smoothing,
n_docs=n_docs,
)
if do_marginalize:
logits = self.marginalize(logits, outputs.doc_scores, n_docs)
return RetrievAugLMMarginOutput(
loss=loss,
logits=logits,
doc_scores=outputs.doc_scores,
caches=outputs.caches,
context_input_ids=outputs.context_input_ids,
context_attention_mask=outputs.context_attention_mask,
retrieved_doc_embeds=outputs.retrieved_doc_embeds,
retrieved_doc_ids=outputs.retrieved_doc_ids,
question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
question_enc_hidden_states=outputs.question_enc_hidden_states,
question_enc_attentions=outputs.question_enc_attentions,
generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
generator_enc_hidden_states=outputs.generator_enc_hidden_states,
generator_enc_attentions=outputs.generator_enc_attentions,
generator_dec_hidden_states=outputs.generator_dec_hidden_states,
generator_dec_attentions=outputs.generator_dec_attentions,
generator_cross_attentions=outputs.generator_cross_attentions,
)
@torch.no_grad()
def generate(
self,
input_ids=None,
attention_mask=None,
context_input_ids=None,
context_attention_mask=None,
doc_scores=None,
max_length=None,
min_length=None,
early_stopping=None,
y_cache=None,
num_beams=None,
num_beam_groups=None,
diversity_penalty=None,
BOS=None,
PAD=None,
EOS=None,
length_penalty=None,
no_repeat_ngram_size=None,
encoder_no_repeat_ngram_size=None,
repetition_penalty=None,
bad_words_ids=None,
num_return_sequences=None,
decoder_start_token_id=None,
n_docs=None,
prefix_allowed_tokens_fn=None,
logits_processor=LogitsProcessorList(),
renormalize_logits=None,
stopping_criteria=StoppingCriteriaList(),
forced_bos_token_id=None,
forced_eos_token_id=None,
remove_invalid_values=None,
exponential_decay_length_penalty=None,
**model_kw,
):
n_docs = n_docs if n_docs is not None else self.config.n_docs
num_beams = num_beams if num_beams is not None else self.config.num_beams
num_beam_groups = (
num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups
)
max_length = max_length if max_length is not None else self.config.max_length
num_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.config.num_return_sequences
)
BOS = BOS if BOS is not None else self.config.generator.BOS
EOS = EOS if EOS is not None else self.config.generator.EOS
PAD = PAD if PAD is not None else self.config.generator.PAD
y_cache = y_cache if y_cache is not None else self.config.y_cache
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.generator.decoder_start_token_id
)
remove_invalid_values = (
remove_invalid_values
if remove_invalid_values is not None
else self.config.remove_invalid_values
)
exponential_decay_length_penalty = (
exponential_decay_length_penalty
if exponential_decay_length_penalty is not None
else self.config.exponential_decay_length_penalty
)
# retrieve docs
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(
input_ids, attention_mask=attention_mask
)[0]
out = self.retriever(
input_ids,
question_hidden_states.cpu().detach().to(torch.float32).numpy(),
prefix=self.generator.config.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# set to correct device
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(
question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
).squeeze(1)
assert (context_input_ids.shape[0] % n_docs) == 0
# batch_size
batch_size = context_input_ids.shape[0] // n_docs
encoder = self.rag.generator.get_encoder()
encoder_outputs = encoder(
input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True
)
input_ids = torch.full(
(batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
input_ids_seq_length = input_ids.shape[-1]
y = encoder_outputs["y"]
def extend_enc_output(tensor, num_beams=None):
# split into `batch_size`, `num_beams`, `num_docs`
tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
# repeat same last hidden states over `num_beams` dimension
tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
# merge `batch_size`, `num_beams`, `num_docs` dims again
return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
# correctly extend y and attention mask
context_attention_mask = extend_enc_output(context_attention_mask, num_beams=num_beams)
encoder_outputs["y"] = extend_enc_output(y, num_beams=num_beams)
doc_scores = doc_scores.repeat_interleave(num_beams, dim=0)
# define start_len & additional parameters
model_kw["doc_scores"] = doc_scores
model_kw["encoder_outputs"] = encoder_outputs
model_kw["attention_mask"] = context_attention_mask
model_kw["n_docs"] = n_docs
pre_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=context_input_ids,
bad_words_ids=bad_words_ids,
min_length=min_length,
max_length=max_length,
EOS=EOS,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=forced_eos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
remove_invalid_values=remove_invalid_values,
exponential_decay_length_penalty=exponential_decay_length_penalty,
logits_processor=logits_processor,
renormalize_logits=renormalize_logits,
)
if num_beams == 1:
if num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search."
)
return self.greedy_search(
input_ids,
logits_processor=pre_processor,
max_length=max_length,
PAD=PAD,
EOS=EOS,
**model_kw,
)
elif num_beams > 1:
length_penalty = (
length_penalty if length_penalty is not None else self.config.length_penalty
)
early_stopping = (
early_stopping if early_stopping is not None else self.config.early_stopping
)
if num_return_sequences > num_beams:
raise ValueError(
"`num_return_sequences` has to be smaller or equal to `num_beams`."
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
)
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=pre_processor,
max_length=max_length,
PAD=PAD,
EOS=EOS,
**model_kw,
)
else:
raise ValueError(
f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {num_beams}"
)
def shift_tokens_right(self, input_ids, start_token_id=None):
if start_token_id is None:
start_token_id = self.config.decoder_start_token_id
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = start_token_id
return shifted_input_ids
def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
# shift tokens left
target = torch.cat(
[
target[:, 1:],
target.new(target.shape[0], 1).fill_(self.config.generator.PAD),
],
1,
)
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.PAD)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return ll.squeeze(-1), smooth_obj.squeeze(-1)
rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
target = target.unsqueeze(-1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
ll, smooth_obj = _mask_pads(ll, smooth_obj)
ll = ll.sum(1) # sum over tokens
smooth_obj = smooth_obj.sum(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,554
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/albert.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.albert import PreTrained
from ...models.albert import ForPreTraining
logging.set_verbosity_info()
log = logging.get_logger(__name__)
_SKIP = [
"adam_v",
"adam_m",
"AdamWeightDecayOptimizer",
"AdamWeightDecayOptimizer_1",
"global_step",
]
def load_src_weights(model, src_path):
src_path = abspath(src_path)
log.info(f"Loading from: {src_path}")
xs = tf.train.list_variables(src_path)
assert len(xs) > 0
ns, ws = _load_weights(xs, src_path)
for n, w in zip(ns, ws):
n = n.replace("module/", "")
n = n.replace("ffn_1", "ffn")
n = n.replace("bert/", "albert/")
n = n.replace("attention_1", "attention")
n = n.replace("transform/", "")
n = n.replace("LayerNorm_1", "full_layer_layer_norm")
n = n.replace("LayerNorm", "attention/LayerNorm")
n = n.replace("transformer/", "")
n = n.replace("intermediate/dense/", "")
n = n.replace("ffn/intermediate/output/dense/", "ffn_output/")
n = n.replace("/output/", "/")
n = n.replace("/self/", "/")
n = n.replace("pooler/dense", "pooler")
n = n.replace("cls/predictions", "predictions")
n = n.replace("predictions/attention", "predictions")
n = n.replace("embeddings/attention", "embeddings")
n = n.replace("inner_group_", "albert_layers/")
n = n.replace("group_", "albert_layer_groups/")
if len(n.split("/")) == 1 and ("output_bias" in n or "output_weights" in n):
n = "classifier/" + n
if "seq_relationship" in n:
n = n.replace("seq_relationship/output_", "sop_classifier/classifier/")
n = n.replace("weights", "weight")
ss = n.split("/")
if any(s in _SKIP for s in ss):
log.info(f"Skipping {'/'.join(ss)}")
continue
p = model
for s in ss:
if re.fullmatch(r"[A-Za-z]+_\d+", s):
scopes = re.split(r"_(\d+)", s)
else:
scopes = [s]
if scopes[0] == "kernel" or scopes[0] == "gamma":
p = getattr(p, "weight")
elif scopes[0] == "output_bias" or scopes[0] == "beta":
p = getattr(p, "bias")
elif scopes[0] == "output_weights":
p = getattr(p, "weight")
elif scopes[0] == "squad":
p = getattr(p, "classifier")
else:
try:
p = getattr(p, scopes[0])
except AttributeError:
log.info(f"Skipping {'/'.join(ss)}")
continue
if len(scopes) >= 2:
p = p[int(scopes[1])]
if s[-11:] == "_embeddings":
p = getattr(p, "weight")
elif s == "kernel":
w = np.transpose(w)
assert p.shape == w.shape
p.data = torch.from_numpy(w)
return model
def _load_weights(xs, src_path):
ns = []
ws = {}
for n, shape in xs:
log.info(f"Loading TF weight {n} with shape {shape}")
ns.append(n)
ws[n] = tf.train.load_variable(src_path, n)
return ns, ws
def to_pytorch(src_path, cfg_path, save_path):
cfg = PreTrained.from_json_file(cfg_path)
print(f"Building from config: {cfg}")
m = ForPreTraining(cfg)
load_src_weights(m, src_path)
print(f"Saving to: {save_path}")
torch.save(m.state_dict(), save_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", default=None, type=str, required=True)
x.add_argument("--cfg_path", default=None, type=str, required=True)
x.add_argument("--save_path", default=None, type=str, required=True)
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path)
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,555
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/gen.py
|
# Copyright 2021 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# conditional text generation with (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
import argparse
import logging
import numpy as np
import torch
from transformers import (
CTRLLMHeadModel,
CTRLTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
TransfoXLLMHeadModel,
TransfoXLTokenizer,
XLMTokenizer,
XLMWithLMHeadModel,
XLNetLMHeadModel,
XLNetTokenizer,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
log = logging.getLogger(__name__)
MAX_LENGTH = int(10000)
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"ctrl": (CTRLLMHeadModel, CTRLTokenizer),
"openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"xlnet": (XLNetLMHeadModel, XLNetTokenizer),
"transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer),
"xlm": (XLMWithLMHeadModel, XLMTokenizer),
}
PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def adjust_length_to_model(x, lim):
if x < 0 and lim > 0:
x = lim
elif 0 < lim < x:
x = lim
elif x < 0:
x = MAX_LENGTH
return x
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default=None, type=str, required=True)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--repetition_penalty", type=float, default=1.0)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--prefix", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--xlm_language", type=str, default="")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--no_cuda", action="store_true")
parser.add_argument("--num_return_sequences", type=int, default=1)
parser.add_argument("--fp16", action="store_true")
ps = parser.parse_args()
ps.device = torch.device("cuda" if torch.cuda.is_available() and not ps.no_cuda else "cpu")
ps.n_gpu = 0 if ps.no_cuda else torch.cuda.device_count()
log.warning(f"device: {ps.device}, n_gpu: {ps.n_gpu}, 16-bits training: {ps.fp16}")
def set_seed(ps):
np.random.seed(ps.seed)
torch.manual_seed(ps.seed)
if ps.n_gpu > 0:
torch.cuda.manual_seed_all(ps.seed)
def prepare_ctrl_input(ps, _, tokenizer, prompt):
if ps.temperature > 0.7:
log.info("CTRL typically works better with lower temperatures (and lower top_k).")
y = tokenizer.encode(prompt, add_special_tokens=False)
if not any(y[0] == x for x in tokenizer.control_codes.values()):
log.info(
"WARNING! You are not starting your generation from a control code so you won't get good results"
)
return prompt
def prepare_xlm_input(ps, model, tokenizer, prompt):
# kw = {"language": None, "MSK_TOK": None}
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
ls = model.config.lang2id.keys()
if ps.xlm_language in ls:
l = ps.xlm_language
else:
l = None
while l not in ls:
l = input("Using XLM. Select language in " + str(list(ls)) + " >>> ")
model.config.LANG = model.config.lang2id[l]
# kw["language"] = tokenizer.lang2id[l]
return prompt
def prepare_xlnet_input(ps, _, tokenizer, prompt):
x = ps.prefix if ps.prefix else ps.padding_text if ps.padding_text else PREFIX
prompt = x + prompt
return prompt
def prepare_transfoxl_input(ps, _, tokenizer, prompt):
x = ps.prefix if ps.prefix else ps.padding_text if ps.padding_text else PREFIX
prompt = x + prompt
return prompt
PREPROCESSING_FUNCTIONS = {
"ctrl": prepare_ctrl_input,
"xlm": prepare_xlm_input,
"xlnet": prepare_xlnet_input,
"transfo-xl": prepare_transfoxl_input,
}
set_seed(ps)
try:
ps.model_type = ps.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[ps.model_type]
except KeyError:
raise KeyError("Model {} is not supported")
tokenizer = tokenizer_class.from_pretrained(ps.model_name)
model = model_class.from_pretrained(ps.model_name)
model.to(ps.device)
if ps.fp16:
model.half()
ps.length = adjust_length_to_model(ps.length, lim=model.config.n_pos)
log.info(ps)
x = ps.prompt if ps.prompt else input("Model prompt >>> ")
if ps.model_type in PREPROCESSING_FUNCTIONS.keys():
prep = PREPROCESSING_FUNCTIONS.get(ps.model_type)
y = prep(ps, model, tokenizer, x)
if model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kws = {"add_space_before_punct_symbol": True}
else:
kws = {}
prompt = tokenizer.encode(y, add_special_tokens=False, return_tensors="pt", **kws)
else:
prefix = ps.prefix if ps.prefix else ps.padding_text
prompt = tokenizer.encode(prefix + x, add_special_tokens=False, return_tensors="pt")
prompt = prompt.to(ps.device)
if prompt.size()[-1] == 0:
ins = None
else:
ins = prompt
out = model.generate(
input_ids=ins,
max_len=ps.length + len(prompt[0]),
temperature=ps.temperature,
top_k=ps.k,
top_p=ps.p,
repetition_penalty=ps.repetition_penalty,
do_sample=True,
num_return_sequences=ps.num_return_sequences,
)
if len(out.shape) > 2:
out.squeeze_()
ys = []
for i, x in enumerate(out):
print(f"=== GENERATED SEQUENCE {i + 1} ===")
x = x.tolist()
y = tokenizer.decode(x, clean_up_tokenization_spaces=True)
y = y[: y.find(ps.stop_token) if ps.stop_token else None]
y = x + y[len(tokenizer.decode(prompt[0], clean_up_tokenization_spaces=True)) :]
ys.append(y)
print(y)
return ys
if __name__ == "__main__":
main()
"""
python gen.py \
--model_type=gpt2 \
--model_name=gpt2
"""
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,556
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/run/image.py
|
from dataclasses import dataclass, field
import datasets
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoModelForImageClassification,
Trainer,
)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def pil_loader(path):
with open(path, "rb") as f:
im = Image.open(f)
return im.convert("RGB")
add_argument("--dataset_name", type=str, default="nateraw/image-folder")
@dataclass
class DataTrainingArguments:
train_dir = field(default=None, metadata={"help": "A folder containing the training data."})
validation_dir = field(
default=None, metadata={"help": "A folder containing the validation data."}
)
train_val_split = field(
default=0.15, metadata={"help": "Percent to split off of train for validation."}
)
max_train_samples = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
data_files = dict()
if self.train_dir is not None:
data_files["train"] = self.train_dir
if self.validation_dir is not None:
data_files["val"] = self.validation_dir
self.data_files = data_files if data_files else None
add_argument("--model_name", type=str, default="google/vit-base-patch16-224-in21k", required=True)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
labels = torch.tensor([example["labels"] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
def main():
ds = load_dataset(
data_args.dataset_name,
data_args.dataset_config,
data_files=data_args.data_files,
cache_dir=model_args.cache_dir,
task="image-classification",
)
# If we don't have a validation split, split off a percentage of train as validation.
data_args.train_val_split = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
split = ds["train"].train_test_split(data_args.train_val_split)
ds["train"] = split["train"]
ds["validation"] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
labels = ds["train"].features["labels"].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# Load the accuracy metric from the datasets package
metric = datasets.load_metric("accuracy")
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p):
"""Computes accuracy on a batch of predictions"""
return metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids)
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name,
n_labels=len(labels),
label2id=label2id,
id2label=id2label,
finetune="image-classification",
cache_dir=model_args.cache_dir,
revision=model_args.model_version,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForImageClassification.from_pretrained(
model_args.model_name,
from_tf=bool(".ckpt" in model_args.model_name),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_version,
use_auth_token=True if model_args.use_auth_token else None,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor or model_args.model_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_version,
use_auth_token=True if model_args.use_auth_token else None,
)
# Define torchvision transforms to be applied to each image.
normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
_train_transforms = Compose(
[
RandomResizedCrop(feature_extractor.size),
RandomHorizontalFlip(),
ToTensor(),
normalize,
]
)
_val_transforms = Compose(
[
Resize(feature_extractor.size),
CenterCrop(feature_extractor.size),
ToTensor(),
normalize,
]
)
def train_transforms(example_batch):
"""Apply _train_transforms across a batch."""
example_batch["pixel_values"] = [
_train_transforms(pil_img.convert("RGB")) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(example_batch):
"""Apply _val_transforms across a batch."""
example_batch["pixel_values"] = [
_val_transforms(pil_img.convert("RGB")) for pil_img in example_batch["image"]
]
return example_batch
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
ds["train"] = (
ds["train"]
.shuffle(seed=training_args.seed)
.select(range(data_args.max_train_samples))
)
# Set the training transforms
ds["train"].set_transform(train_transforms)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
ds["validation"] = (
ds["validation"]
.shuffle(seed=training_args.seed)
.select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(val_transforms)
# Initalize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=ds["train"] if training_args.do_train else None,
eval_dataset=ds["validation"] if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=feature_extractor,
data_collator=collate_fn,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kw = {
"finetuned_from": model_args.model_name,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kw)
else:
trainer.create_model_card(**kw)
if __name__ == "__main__":
main()
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,557
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/convert/decision_transfo.py
|
# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
tf_path = os.path.abspath(gpt2_checkpoint_path)
log.info(f"Converting TensorFlow checkpoint from {tf_path}")
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
log.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
log.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
33,558
|
quantapix/qnarre
|
refs/heads/main
|
/qnarre/prep/tokens/fast/gpt2.py
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import json
from tokenizers import pre_tokenizers
from ....tokens.fast import PreTrainedTokenizerFast
from ..gpt2 import Tokenizer as GPT2
VOCAB_FS = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_file": "tokenizer.json",
}
VOCAB_MAP = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
INPUT_CAPS = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class Tokenizer(PreTrainedTokenizerFast):
vocab_fs = VOCAB_FS
vocab_map = VOCAB_MAP
input_caps = INPUT_CAPS
model_input_names = ["input_ids", "mask"]
slow_tokenizer_class = GPT2
def __init__(
self,
vocab_file=None,
merges_file=None,
tokenizer_file=None,
unk="<|endoftext|>",
bos="<|endoftext|>",
eos="<|endoftext|>",
add_prefix_space=False,
**kw,
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
unk=unk,
bos=bos,
eos=eos,
add_prefix_space=add_prefix_space,
**kw,
)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
pre_tok_state["add_prefix_space"] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
def _batch_encode_plus(self, *args, **kw):
is_split_into_words = kw.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words
return super()._batch_encode_plus(*args, **kw)
def _encode_plus(self, *args, **kw):
is_split_into_words = kw.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words
return super()._encode_plus(*args, **kw)
def save_vocabulary(self, dir, pre=None):
return tuple(self._tokenizer.model.save(dir, name=pre))
def _build_conversation_input_ids(self, conversation):
ys = []
for is_user, text in conversation.iter_texts():
ys.extend(self.encode(text, add_special_tokens=False) + [self.EOS])
if len(ys) > self.model_max_length:
ys = ys[-self.model_max_length :]
return ys
|
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.