index
int64
repo_name
string
branch_name
string
path
string
content
string
import_graph
string
33,341
lilywise96/ChoResearch
refs/heads/master
/tree_modification.py
""" Name: tree_modification.py Author: Lily Wise All functions that modify or determine information about the annotation or ontology trees. """ from math import log10 # Calculates the specificity of every node in the tree using the information content. # # param: tree - dictionary; key: term, value: all parents (transitively) # param: tree_ic - dictionary; key: term, value: information content of that term (-log(P)) # returns: term_spec - dictionary, key: term, value: specificity # (sum of info content of ancestors divided by the number of tree terms) def all_specificity(tree, tree_ic): term_spec = {} all_nodes = set() for node in tree: term_spec[node] = 0 all_nodes.add(node) for parent in tree[node]: if parent in tree_ic: term_spec[node] += tree_ic[parent] all_nodes.add(parent) for term in term_spec: term_spec[term] /= len(all_nodes) return term_spec # Swaps a dictionary that has a key and a value where the value is a list # or a set. # # param: key_to_value - a dictionary; key: key, value: set or list of values # return: value_to_key - a dictionary; key: previous value, value: set or list of previous keys def swap_key_value(key_to_value): value_to_key = {} for key in key_to_value: for value in key_to_value[key]: if value not in value_to_key: value_to_key[value] = set() value_to_key[value].add(key) return value_to_key # Joins all trees and switches from term to genes into gene to terms. # # param: hp_tg - the human phenotype tree, term to genes # param: bp_tg - the biological process tree, term to genes # param: mf_tg - the molecular function tree, term to genes # # returns: the joined tree, gene to terms def join_gt(hp_tg, bp_tg, mf_tg): hp_gt = swap_key_value(hp_tg) bp_gt = swap_key_value(bp_tg) mf_gt = swap_key_value(mf_tg) all_gt = {} for gene in hp_gt: if gene not in all_gt: all_gt[gene] = set() for term in hp_gt[gene]: all_gt[gene].add(term) for gene in bp_gt: if gene not in all_gt: all_gt[gene] = set() for term in bp_gt[gene]: all_gt[gene].add(term) for gene in mf_gt: if gene not in all_gt: all_gt[gene] = set() for term in mf_gt[gene]: all_gt[gene].add(term) return all_gt # Find all leaf nodes and add all of the genes to their parents. # # param: tree_child_parent - tree that is child to parent # param: gene_to_terms - gene to terms it is annotated to # returns: terms to genes tree with all ancestors having all the terms below theirs genes def gene_to_all_parents(tree_child_parent, gene_to_terms): tree_parent_child = swap_key_value(tree_child_parent) all_nodes = set() terms_to_genes = swap_key_value(gene_to_terms) leaf_nodes = set() to_check = set() for node in set(tree_parent_child.keys()): all_nodes.add(node) for node in set(tree_child_parent.keys()): all_nodes.add(node) for node in all_nodes: if node not in set(tree_parent_child.keys()): leaf_nodes.add(node) for node in leaf_nodes: for parent in tree_child_parent[node]: to_check.add(parent) while len(to_check) != 0: checking = to_check.pop() if checking in tree_child_parent: for parent in tree_child_parent[checking]: to_check.add(parent) terms_to_genes = gene_to_parent(checking, tree_parent_child, terms_to_genes) return terms_to_genes # Add all gene_carry genes to the current node. Add current nodes # genes to gene_carry and then call for all parents of current node # this function until there are no parents left to call. # # param: node - the current node in the tree # param: tree - a dictionary; key: term, value: set of parent terms # param: gene_to_terms - a dictionary; key: gene, value: set of terms that it is annotated to # return: new_gene_to_terms - updated gene_to_terms with the node that was called having all of its children's genes def gene_to_parent(node, tree, terms_to_genes): new_terms_to_genes = terms_to_genes for child in tree[node]: if node not in new_terms_to_genes: new_terms_to_genes[node] = set() if child in new_terms_to_genes: for c in new_terms_to_genes[child]: new_terms_to_genes[node].add(c) return new_terms_to_genes # Find root node and move down through the tree until all terms transitively know their parents. # # param: tree_child_parent - dictionary; key: term, value: set of parents # returns: dictionary; key: term, value: set of transitive parents def terms_to_all_parents(tree_child_parent): new_tree_child_parent = tree_child_parent tree_parent_child = swap_key_value(new_tree_child_parent) # Find the root root = '' for node in new_tree_child_parent: if len(new_tree_child_parent[node]) == 0 and node in tree_parent_child: root = node # Check each of the nodes starting at the root and moving down through the tree. to_check = [] for child in tree_parent_child[root]: to_check.append(child) while len(to_check) != 0: cur_check = to_check.pop() add_parents(cur_check, new_tree_child_parent) if cur_check in tree_parent_child: for child in tree_parent_child[cur_check]: to_check.append(child) return new_tree_child_parent # Joins to trees together into one tree. # # param: tree - dictionary; key: gene, value: set of terms (either bp or mf) # param: hp_tree - dictionary; key: gene, value: set of terms in hp # returns: dictionary; key: gene, value: set of terms def join_two(tree, hp_tree): combined_tree = {} for gene in tree: combined_tree[gene] = tree[gene] for gene in hp_tree: if gene in combined_tree: combined_tree[gene] = tree[gene].union(hp_tree[gene]) return combined_tree # Adds all parents transitively to the current node. # # param: node - the current node in the tree # param: tree - a dictionary; key: term, value: set of parent terms (is modified in this function) def add_parents(node, tree_child_parent): for parents in tree_child_parent[node]: tree_child_parent[node] = tree_child_parent[node].union(tree_child_parent[parents]) # Calculates information content of every term of the tree # # param: tree - the tree; key: term, value: genes annotated to that term (with transitive property) # return: term_ic - dictionary; key: term, value: information content of that term def calculate_ic(tree): term_ic = {} # key: term, value: ic distinct_genes = set() for term in tree: term_ic[term] = len(tree[term]) distinct_genes = distinct_genes.union(tree[term]) for term in term_ic: term_ic[term] /= len(distinct_genes) if term_ic[term] != 0: term_ic[term] = -log10(term_ic[term]) return term_ic
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,342
lilywise96/ChoResearch
refs/heads/master
/main_runner.py
from main import general_main # def general_main(freq_file_ext, association_file_ext, recreate_onto_ann, recreate_freq_itemsets, tree, # min_support, min_weighted_support, min_confidence, min_information_content, min_coverage): trees = ['all', 'bp', 'mf', 'hp'] support = [0.02, 0.015] weighted_support = 0.1 confidence = [0.03, 0.02, 0.01] info_content = .3 coverage = 0.1 count_freq_file = 1 count_assoc_file = 1 first = True for tree in range(0, len(trees)): for sup in range(0, len(support)): for conf in range(0, len(confidence)): if first: general_main(count_freq_file, count_assoc_file, "true", "true", trees[tree], support[sup], weighted_support, confidence[conf], info_content, coverage) first = False else: general_main(count_freq_file, count_assoc_file, "false", "true", trees[tree], support[sup], weighted_support, confidence[conf], info_content, coverage) count_assoc_file += 1 count_freq_file += 1
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,343
lilywise96/ChoResearch
refs/heads/master
/main.py
""" Filename: main.py Author: Lily Wise Calls other functions to find associations between genes and terms for diseases. Should calculate associations with support 4% - 10%, coverage 4% - 10%, and confidence 20% - 50%. """ # BP -> BP and MF -> MF and HPO -> HPO and BP -> HPO and MF -> HPO # Accuracy Measuring? from ontology_parsing import hpo_parsing_onto, parsing_go, testing_ontology_parsing from annotation_parsing import hpo_parsing_ann, parsing_ann, testing_annotation_parsing from tree_modification import gene_to_all_parents, join_gt, calculate_ic, terms_to_all_parents, \ all_specificity, swap_key_value, join_two from apriori_algorithm import apriori from association_creation import create_associations from math import ceil import sys # Directories input_direct = "./input/" created_direct = "./created/" # File names # Specificity Storage hp_spec_filename = created_direct + "hp_spec.txt" bp_spec_filename = created_direct + "bp_spec.txt" mf_spec_filename = created_direct + "mf_spec.txt" # Information Content Storage hp_ic_filename = created_direct + "hp_ic.txt" bp_ic_filename = created_direct + "bp_ic.txt" mf_ic_filename = created_direct + "mf_ic.txt" # Transitivity of Terms Storage hp_trans_filename = created_direct + "hp_trans.txt" bp_trans_filename = created_direct + "bp_trans.txt" mf_trans_filename = created_direct + "mf_trans.txt" # Gene to Terms All gene_term_filename = created_direct + "gene_term.txt" gene_term_bp_filename = created_direct + "gene_term_bp.txt" gene_term_mf_filename = created_direct + "gene_term_mf.txt" gene_term_hp_filename = created_direct + "gene_term_hp.txt" # Ontology Given hp_ontology_filename = input_direct + "hp.obo.txt" g_ontology_filename = input_direct + "go.obo" # Annotation Given hp_annotations_filename = input_direct + "hpo_genes_to_phenotype.txt" g_annotations_filename = input_direct + "goa_human.gaf" # Creates ontology and writes it to an output file, as well as calculates information content. # # param: gene_term_output_filename - the file the ontology is written to # return: all_gt - dictionary; key: gene, value: set of terms def create_onto_ann(): # Read in ontologies. terms to parents hpo_terms_parents = hpo_parsing_onto(hp_ontology_filename) bp_terms_parents, mf_terms_parents, cc_terms_parents = parsing_go(g_ontology_filename) # Read in annotations. hp_gt = hpo_parsing_ann(hp_annotations_filename) gene_syn, bp_gt, mf_gt, cc_gt = parsing_ann(g_annotations_filename) # Creates transitive trees hpo_terms_parents_trans = terms_to_all_parents(hpo_terms_parents) bp_terms_parents_trans = terms_to_all_parents(bp_terms_parents) mf_terms_parents_trans = terms_to_all_parents(mf_terms_parents) # Recursively add genes to parents. hp_tg = gene_to_all_parents(hpo_terms_parents, hp_gt) bp_tg = gene_to_all_parents(bp_terms_parents, bp_gt) mf_tg = gene_to_all_parents(mf_terms_parents, mf_gt) # Calculate Information Content hp_ic = calculate_ic(hp_tg) bp_ic = calculate_ic(bp_tg) mf_ic = calculate_ic(mf_tg) # Save terms for each hp_terms = set(hp_tg.keys()) bp_terms = set(bp_tg.keys()) mf_terms = set(mf_tg.keys()) # Specificity of all nodes hp_spec = all_specificity(hpo_terms_parents_trans, hp_ic) bp_spec = all_specificity(bp_terms_parents_trans, bp_ic) mf_spec = all_specificity(mf_terms_parents_trans, mf_ic) # Join all term to gene and make them gene to term. all_gt = join_gt(hp_tg, bp_tg, mf_tg) hp_gt = swap_key_value(hp_tg) bp_gt = swap_key_value(bp_tg) mf_gt = swap_key_value(mf_tg) output_file = open(gene_term_filename, "w") for gene in all_gt: output_file.write(gene) for term in all_gt[gene]: output_file.write("\t") output_file.write(term) output_file.write("\n") output_file.close() output_file = open(gene_term_bp_filename, "w") for gene in bp_gt: output_file.write(gene) for term in bp_gt[gene]: output_file.write("\t") output_file.write(term) output_file.write("\n") output_file.close() output_file = open(gene_term_mf_filename, "w") for gene in mf_gt: output_file.write(gene) for term in mf_gt[gene]: output_file.write("\t") output_file.write(term) output_file.write("\n") output_file.close() output_file = open(gene_term_hp_filename, "w") for gene in hp_gt: output_file.write(gene) for term in hp_gt[gene]: output_file.write("\t") output_file.write(term) output_file.write("\n") output_file.close() # Write specificity to files. output_file = open(hp_spec_filename, "w") for term in hp_spec: output_file.write(term+"\t"+str(hp_spec[term])+"\n") output_file.close() output_file = open(bp_spec_filename, "w") for term in bp_spec: output_file.write(term+"\t"+str(bp_spec[term])+"\n") output_file.close() output_file = open(mf_spec_filename, "w") for term in mf_spec: output_file.write(term+"\t"+str(mf_spec[term])+"\n") output_file.close() # Write information content to files. output_file = open(hp_ic_filename, "w") for term in hp_ic: output_file.write(term + "\t" + str(hp_ic[term]) + "\n") output_file.close() output_file = open(bp_ic_filename, "w") for term in bp_ic: output_file.write(term + "\t" + str(bp_ic[term]) + "\n") output_file.close() output_file = open(mf_ic_filename, "w") for term in mf_ic: output_file.write(term + "\t" + str(mf_ic[term]) + "\n") output_file.close() all_ic = {} for term in hp_ic: all_ic[term] = hp_ic[term] for term in bp_ic: all_ic[term] = bp_ic[term] for term in mf_ic: all_ic[term] = mf_ic[term] all_spec = {} for term in hp_spec: all_spec[term] = hp_spec[term] for term in bp_spec: all_spec[term] = bp_spec[term] for term in mf_spec: all_spec[term] = mf_spec[term] return all_gt, all_spec, all_ic, bp_gt, mf_gt, hp_gt def read_onto_ann(): file = open(gene_term_filename, "r") gt = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] gt[cols[0]] = set() for i in range(1, len(cols)): gt[cols[0]].add(cols[i]) file.close() file = open(gene_term_bp_filename, "r") bp_gt = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] bp_gt[cols[0]] = set() for i in range(1, len(cols)): bp_gt[cols[0]].add(cols[i]) file.close() file = open(gene_term_mf_filename, "r") mf_gt = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] mf_gt[cols[0]] = set() for i in range(1, len(cols)): mf_gt[cols[0]].add(cols[i]) file.close() file = open(gene_term_hp_filename, "r") hp_gt = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] hp_gt[cols[0]] = set() for i in range(1, len(cols)): hp_gt[cols[0]].add(cols[i]) file.close() # Read Specificity Files file = open(hp_spec_filename, "r") hp_spec = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] hp_spec[cols[0]] = float(cols[1]) file.close() file = open(bp_spec_filename, "r") bp_spec = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] bp_spec[cols[0]] = float(cols[1]) file.close() file = open(mf_spec_filename, "r") mf_spec = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] mf_spec[cols[0]] = float(cols[1]) file.close() # Read Information Content Files file = open(hp_ic_filename, "r") hp_ic = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] hp_ic[cols[0]] = float(cols[1]) file.close() file = open(bp_ic_filename, "r") bp_ic = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] bp_ic[cols[0]] = float(cols[1]) file.close() file = open(mf_ic_filename, "r") mf_ic = {} for line in file: cols = line.split("\t") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] mf_ic[cols[0]] = float(cols[1]) file.close() all_ic = {} for term in hp_ic: all_ic[term] = hp_ic[term] for term in bp_ic: all_ic[term] = bp_ic[term] for term in mf_ic: all_ic[term] = mf_ic[term] all_spec = {} for term in hp_spec: all_spec[term] = hp_spec[term] for term in bp_spec: all_spec[term] = bp_spec[term] for term in mf_spec: all_spec[term] = mf_spec[term] return gt, all_spec, all_ic, bp_gt, mf_gt, hp_gt # Create frequent itemsets. def create_freq_itemsets(filename, possible_left, all_gt, min_support, min_weighted_support, min_information_content, all_spec, all_ic): all_terms = set() for gene in all_gt: for term in all_gt[gene]: all_terms.add(term) print("ALL GT: ", end="") print(all_gt) freq_itemsets = apriori(all_gt, all_terms, min_support, min_weighted_support, min_information_content, all_spec, all_ic) all_itemsets = [] for size_freq in freq_itemsets: for itemset in freq_itemsets[size_freq]: all_itemsets.append(itemset) freq_itemsets = all_itemsets for itemset in freq_itemsets: found = False for item in range(0, len(itemset)): if itemset[item] in possible_left: found = True if not found: freq_itemsets.remove(itemset) file = open(filename, "w") file.write("Min Support - "+str(min_support)+"\n") file.write("Min Information Content - " + str(min_information_content) + "\n") file.write("Min Weighted Support - " + str(min_weighted_support) + "\n") for itemset in freq_itemsets: for item in range(0, len(itemset)): if item != 0: file.write("\t") file.write(itemset[item]) file.write("\n") file.close() return freq_itemsets # Read frequent itemsets. def read_freq_itemsets(filename): freq_itemsets = [] file = open(filename, "r") count = 3 for line in file: if count > 0: count -= 1 else: items = line.split("\t") items[len(items) - 1] = items[len(items) - 1][0:-1] itemset = set() for item in items: itemset.add(item) freq_itemsets.append(itemset) file.close() return freq_itemsets def create_new_associations(left_terms, right_terms, all_gt, freq_itemsets, min_confidence, min_coverage, filename, all_spec): final_associations = create_associations(left_terms, right_terms, all_gt, freq_itemsets, min_confidence, min_coverage, all_spec) file = open(filename, "w") file.write("Min Coverage - " + str(min_coverage) + "\n") file.write("Min Confidence - " + str(min_confidence) + "\n") for association in final_associations: for associate in range(0, len(association)): if associate != 0: file.write("\t") file.write(association[associate]) file.write("\n") file.close() return final_associations def general_main(freq_file_ext, association_file_ext, recreate_onto_ann, recreate_freq_itemsets, tree, min_support, min_weighted_support, min_confidence, min_information_content, min_coverage): freq_itemsets_filename = created_direct + "freq_itemsets_" + str(freq_file_ext) + ".txt" associations_filename = created_direct + "associations_" + str(association_file_ext) + ".txt" information_filename = "info_on_files.txt" info_file = open(information_filename, "a+") print("Frequent itemsets filename: "+str(freq_itemsets_filename)) print("Associations filename: "+str(associations_filename)) print("Tree: "+str(tree)) print("Minimum support: "+str(min_support)) print("Minimum weighted support: "+str(min_weighted_support)) print("Minimum confidence: "+str(min_confidence)) print("Minimum information content: "+str(min_information_content)) print("Minimum coverage: "+str(min_coverage)) info_file.write("Frequent itemsets filename: " + str(freq_itemsets_filename) + "\n") info_file.write("Associations filename: " + str(associations_filename) + "\n") info_file.write("Tree: " + str(tree) + "\n") info_file.write("Minimum support: " + str(min_support) + "\n") info_file.write("Minimum weighted support: " + str(min_weighted_support) + "\n") info_file.write("Minimum confidence: " + str(min_confidence) + "\n") info_file.write("Minimum information content: " + str(min_information_content) + "\n") info_file.write("Minimum coverage: " + str(min_coverage) + "\n") info_file.write("\n\n") info_file.close() if recreate_onto_ann == "true": all_gt, all_spec, all_ic, bp_gt, mf_gt, hp_gt = create_onto_ann() else: all_gt, all_spec, all_ic, bp_gt, mf_gt, hp_gt = read_onto_ann() possible_left = set() possible_right = set() if tree == 'bp': for gene in bp_gt: possible_left = bp_gt[gene].union(possible_left) for gene in hp_gt: possible_right = hp_gt[gene].union(possible_right) elif tree == 'mf': for gene in mf_gt: possible_left = mf_gt[gene].union(possible_left) for gene in hp_gt: possible_right = hp_gt[gene].union(possible_right) elif tree == 'hp': for gene in hp_gt: possible_left = hp_gt[gene].union(possible_left) possible_right = possible_left else: for gene in all_gt: possible_left = all_gt[gene].union(possible_left) possible_right = possible_left if recreate_freq_itemsets == "true": if tree == 'bp': all_gt = join_two(bp_gt, hp_gt) elif tree == 'mf': all_gt = join_two(mf_gt, hp_gt) elif tree == 'hp': all_gt = hp_gt freq_itemsets = create_freq_itemsets(freq_itemsets_filename, possible_left, all_gt, min_support, min_weighted_support, min_information_content, all_spec, all_ic) else: freq_itemsets = read_freq_itemsets(freq_itemsets_filename) create_new_associations(possible_left, possible_right, all_gt, freq_itemsets, min_confidence, min_coverage, associations_filename, all_spec) print("Done")
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,344
lilywise96/ChoResearch
refs/heads/master
/annotation_parsing.py
""" File: annotation_parsing.py Author: Lily Wise This file parses annotation files for hpo and for go. """ # This function parses the hpo annotation file. It pulls the gene annotation # and terms it is associated to. # # param: filename - the hpo annotation file # return: gene_term_id - a dictionary; key: gene, value: array of terms def hpo_parsing_ann(filename): file = open(filename, "r") gene_id_symbol = {} gene_term_id = {} for line in file: if not line.startswith('#'): columns = line.split('\t') gene_id = columns[0] gene_symbol = columns[1] term_id = columns[3][0:10] if gene_id not in gene_id_symbol.keys(): gene_id_symbol[gene_id] = gene_symbol gene_term_id[gene_symbol] = [] gene_term_id[gene_symbol].append(term_id) return gene_term_id # This function parses the gene annotation file. It pulls the gene annotation # and terms it is associated to. # # param: filename - the gene annotation file # return: gene_syn - a dictionary; key: a gene, value: array of synonyms # return: bp_gene_terms - a biological process dictionary; key: a gene, # value: array of terms that the gene is annotated to # return: mf_gene_terms - a molecular function dictionary; key: a gene, # value: array of terms that the gene is annotated to # return: cc_gene_terms - a cellular component dictionary; key: a gene, # value: array of terms that the gene is annotated to def parsing_ann(filename): file = open(filename, "r") gene_syn = {} bp_gene_terms = {} mf_gene_terms = {} cc_gene_terms = {} for line in file: if not line.startswith('!'): cols = line.split('\t') if 'NOT' in cols[3]: gene = cols[2] term = cols[4] namespace = cols[8] synonym_col = cols[10] if 'P' in namespace: if gene not in bp_gene_terms.keys(): bp_gene_terms[gene] = set() bp_gene_terms[gene].add(term) elif 'F' in namespace: if gene not in mf_gene_terms.keys(): mf_gene_terms[gene] = set() mf_gene_terms[gene].add(term) else: if gene not in cc_gene_terms.keys(): cc_gene_terms[gene] = set() cc_gene_terms[gene].add(term) if gene not in gene_syn.keys(): gene_syn[gene] = set(gene) synonyms = synonym_col.split('|') for syn in synonyms: if syn not in gene_syn[gene]: gene_syn[gene].add(syn) return gene_syn, bp_gene_terms, mf_gene_terms, cc_gene_terms # This function is for testing the annotation with a modified input file. # # param: filename - the file to read in from # return: genes_terms - dictionary; key: gene, value: terms that the gene is annotated to def testing_annotation_parsing(filename): file = open(filename, "r") genes_terms = {} for line in file: cols = line.split(" ") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] if cols[0]: genes_terms[cols[0]] = set() for i in range(1, len(cols)): genes_terms[cols[0]].add(cols[i]) return genes_terms
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,345
lilywise96/ChoResearch
refs/heads/master
/association_creation.py
""" Filename: association_creation.py Author: Lily Wise This creates association and has other functions that are used to calculate the associations. """ # This function calculates the coverage given an association and all the itemsets. # # param: left_val - the left value of an association # param: all_itemsets - all of the itemsets given # return: the number of times the left_val appears in all the itemsets divided by the total number of itemsets def coverage(left_val, all_itemsets, all_spec): count = 0 # Loop through all the itemsets and check if the left_val is in the itemset for itemset in all_itemsets: if left_val in all_itemsets[itemset]: count += 1 cover = 0 if left_val in all_spec: cover = count * all_spec[left_val] * 10 return cover # This functions calculates the confidence of a given association. # # param: all_gt - all of the itemsets # param: association - the current association to calculate confidence for # # returns: the confidence count as a decimal def confidence(all_gt, association, all_spec): confidence_count = 0 # Calculate confidence of an association by iterating over the frequent itemsets # Loop through the transactions for trans in all_gt: found_big = True # For checking all items in the itemset are present # Loop through the items in the itemset for associate in association: found = False # For checking just the current item in the itemset is present # Loop through each yeast in the transaction for i in all_gt[trans]: if i == associate: found = True if not found: found_big = False if found_big: confidence_count += 1 conf = 0 if 1 in association and association[1] in all_spec: conf = confidence_count * all_spec[association[1]] * 100 return conf # This function takes the frequent itemsets that were created by the apriori algorithm and creates associations. An # association is kept if it meets the minimum confidence requirements and the left side of the association meets the # minimum coverage requirements. # # param: all_gt - all the itemsets originally read in # param: freq_itemsets - the frequent itemsets created by the apriori algorithm # param: min_confidence - the minimum confidence, as a decimal # param: min_coverage - the minimum coverage, as a decimal # # returns: the list of final associations that meets the requirements def create_associations(left_terms, right_terms, all_gt, freq_itemsets, min_confidence, min_coverage, all_spec): final_associations = [] associations = all_associations(freq_itemsets) for associate in associations: cur_confidence = confidence(all_gt, associate, all_spec) cur_coverage = coverage(associate[0], all_gt, all_spec) if cur_confidence >= min_confidence and cur_coverage >= min_coverage \ and associate[0] in left_terms and associate[1] in right_terms: final_associations.append(associate) return final_associations # Creates all possible associations with the frequent itemsets. # # param: freq_itemsets - the list of frequent itemsets created by the apriori algorithm # returns: the associations created. def all_associations(freq_itemsets): associations = [] for itemset in freq_itemsets: association = [] for item in itemset: association.append(item) associations.append(association) associations.append(association[::-1]) return associations
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,346
lilywise96/ChoResearch
refs/heads/master
/apriori_algorithm.py
""" Filename: apriori_algorithm.py Author: Lily Wise Calculates the frequent itemsets. """ from math import ceil, log10 from itertools import combinations, permutations # This function calculates support of the itemset from transactions # param transactions: All transactions in a dictionary # param itemset: The itemset to calculate support # return: The support count of the itemset def weighted_support(transactions, itemset, all_spec): support_count = 0 # Calculate support of an itemset by iterating over the frequent itemsets # Loop through the transactions for trans in transactions: found_big = True # For checking all items in the itemset are present # Loop through the items in the itemset for i in itemset: found = False # For checking just the current item in the itemset is present # Loop through each yeast in the transaction for t in transactions[trans]: if i == t: found = True if not found: found_big = False if found_big: support_count += 1 support_weight = 2 * all_spec[itemset[0]] * all_spec[itemset[1]] * support_count if (all_spec[itemset[0]] + all_spec[itemset[1]]) != 0: support_weight /= (all_spec[itemset[0]] + all_spec[itemset[1]]) else: support_weight = 0 return support_weight # This function calculates support of the itemset from transactions # param transactions: All transactions in a dictionary # param itemset: The itemset to calculate support # return: The support count of the itemset def support(transactions, itemset): support_count = 0 # Calculate support of an itemset by iterating over the frequent itemsets # Loop through the transactions for trans in transactions: found_big = True # For checking all items in the itemset are present # Loop through the items in the itemset for i in itemset: found = False # For checking just the current item in the itemset is present # Loop through each yeast in the transaction for t in transactions[trans]: if i == t: found = True if not found: found_big = False if found_big: support_count += 1 return support_count # This function generates a combination from the frequent itemsets of size (itemset_size - 1) and accepts joined # itemsets if they share (itemset_size - 2) items # param frequent_itemsets: The table of frequent itemsets discovered # param itemset_size: The size of joined itemsets # return: All valid joined itemsets def generate_selectively_joined_itemsets(frequent_itemsets, itemset_size): # Record seen_itemsets to prevent duplicates seen_itemsets = set() joined_itemsets = set() # Try all combinations of two itemsets from the table of frequent itemsets and join the pair if they share # (itemset_size - 2) items # Add each joined itemset to the list if it is not present in the list and discard it otherwise for item1 in frequent_itemsets[itemset_size-1]: for item2 in frequent_itemsets[itemset_size-1]: # if the item set is size 1, then you don't need to look for the intersection if itemset_size-1 == 1: temp_tuple = (item1, item2) temp_tuple = tuple(sorted(temp_tuple)) if item1 is not item2 and temp_tuple not in seen_itemsets: joined_itemsets.add(temp_tuple) seen_itemsets.add(temp_tuple) # if the item set is greater than 1, then you need to find the intersection else: list_a = set(item1) list_b = set(item2) # Get the intersection and the union intersection = list_a.intersection(list_b) union = list_a.union(list_b) length_intersection = len(intersection) # Check if the sets have enough in common if length_intersection >= itemset_size-2 and length_intersection is not itemset_size-1: union = sorted(union) temp_tuple = tuple(union) if temp_tuple not in seen_itemsets: seen_itemsets.add(temp_tuple) joined_itemsets.add(temp_tuple) joined_itemsets = sorted(joined_itemsets) return joined_itemsets # This function checks all the subsets of selected itemsets whether they all # are frequent or not and prunes the itemset if anyone of the subsets is not frequent # param selected_itemsets: The itemsets which are needed to be checked # param frequent_itemsets: The table of frequent itemsets discovered # param itemset_size: The size of intended frequent itemsets # return: The itemsets whose all subsets are frequent def apply_apriori_pruning(selected_itemsets, frequent_itemsets, itemset_size): apriori_pruned_itemsets = set() # Add each itemset to the list if all of its subsets are frequent and discard it otherwise if itemset_size > 3: for item in selected_itemsets: sub_satisfy = True for sub in list(combinations(item, itemset_size-2)): if sub not in frequent_itemsets[itemset_size-2]: sub_satisfy = False if sub_satisfy: apriori_pruned_itemsets.add(item) # Add each to the item set if less than 3 because it was already formed from a pruned list so it can't # be pruned further. else: for item in selected_itemsets: apriori_pruned_itemsets.add(item) apriori_pruned_itemsets = sorted(apriori_pruned_itemsets) return apriori_pruned_itemsets # This function generates candidate itemsets of size (itemset_size) by selective joining and apriori pruning # param frequent_itemsets: The table of frequent itemsets discovered # param itemset_size: The size of intended frequent itemsets # return: candidate itemsets formed by selective joining and apriori pruning def generate_candidate_itemsets(frequent_itemsets, itemset_size): joined_itemsets = generate_selectively_joined_itemsets(frequent_itemsets, itemset_size) candidate_itemsets = apply_apriori_pruning(joined_itemsets, frequent_itemsets, itemset_size) return candidate_itemsets # This function generates a table of itemsets with all frequent items from transactions based on a given minimum support # param transactions: The transactions based upon which support is calculated # param items: The unique set of items present in the transaction # param min_support: The minimum support to find frequent itemsets # return: The table of all frequent itemsets of different sizes def generate_all_frequent_itemsets(transactions, items, min_support, min_weighted_support, min_information_content, all_spec, all_ic): min_support = ceil(min_support * len(transactions)) min_weighted_support = min_weighted_support / ceil(min_weighted_support * len(transactions)) min_information_content = min_information_content * -log10(1/len(items)) frequent_itemsets = dict() itemset_size = 0 frequent_itemsets[itemset_size] = list() frequent_itemsets[itemset_size].append(frozenset()) # Frequent itemsets of size 1 itemset_size += 1 frequent_itemsets[itemset_size] = list() # Find all frequent itemsets of size-1 and add them to the list print(len(items)) count = 0 for i in items: print(str(count)) count += 1 list_ver = [i] support_check = support(transactions, list_ver) if support_check >= min_support and all_ic[i] >= min_information_content: frequent_itemsets[itemset_size].append(i) frequent_itemsets[itemset_size] = sorted(frequent_itemsets[itemset_size]) print("Finished itemsize "+str(itemset_size)) # frequent itemsets of greater size itemset_size += 1 while frequent_itemsets[itemset_size - 1]: frequent_itemsets[itemset_size] = list() candidate_itemsets = generate_candidate_itemsets(frequent_itemsets, itemset_size) pruned_itemset = set() # Prune the candidate itemset if its support is less than minimum support for candidate in candidate_itemsets: weighted_sup = weighted_support(transactions, candidate, all_spec) if weighted_sup >= min_weighted_support: pruned_itemset.add(candidate) frequent_itemsets[itemset_size] = pruned_itemset print("Finished itemsize " + str(itemset_size)) itemset_size += 1 return frequent_itemsets # Calls other methods. The main apriori algorithm. # # param: gene_terms - dictionary; key: gene, value: set of terms # param: gene_set - the set of all distinct genes # param: min_support - the minimum support # return: frequent_itemset_table[2] - the frequent itemsets of size 2 def apriori(gene_terms, gene_set, min_support, min_weighted_support, min_information_content, all_spec, all_ic): frequent_itemset_table = generate_all_frequent_itemsets(gene_terms, gene_set, min_support, min_weighted_support, min_information_content, all_spec, all_ic) return frequent_itemset_table
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,347
lilywise96/ChoResearch
refs/heads/master
/ontology_parsing.py
""" File: ontology_parsing.py Author: Lily Wise This file parses ontologies for hpo files and for gene ontology files. """ # This function parses the hpo ontology file. It pulls the terms and # their parents to generate a tree. # # param: filename - the file that holds the hpo ontology # return: terms_parents - a dictionary; key: id, value: array of terms (parents) def hpo_parsing_onto(filename): file = open(filename, "r") terms_parents = {} cur_key = '' # Start reading in file. for line in file: # Find a new term. if '[Term]' in line: cur_key = '' # Find parents. elif line.startswith('is_a:'): cur_parent = line[6:16] if cur_parent not in terms_parents[cur_key]: terms_parents[cur_parent] = set() terms_parents[cur_key].add(cur_parent) # Reads in the id number. elif line.startswith('id:'): cur_key = line[4:14] if cur_key not in terms_parents.keys(): terms_parents[cur_key] = set() return terms_parents # This function parse the gene ontology file. It pulls the terms and their # parents. If is_obsolete is found then the term is not included. # # param: filename - the file that holds the gene ontology # return: terms_parents - a dictionary; key: id, value: array of terms (parents) def parsing_go(filename): file = open(filename, "r") bp_terms_parents = {} mf_terms_parents = {} cc_terms_parents = {} cur_parents = set() cur_key = '' is_obsolete = False namespace = '' # Start reading in file. for line in file: # Identifies that a new term is starting. if 'Term' in line: if not is_obsolete: if namespace is 'b': bp_terms_parents[cur_key] = cur_parents elif namespace is 'm': mf_terms_parents[cur_key] = cur_parents elif namespace is 'c': cc_terms_parents[cur_key] = cur_parents cur_parents = set() cur_key = '' is_obsolete = False # Reads in the id. elif line.startswith('id:'): cur_key = line[4:14] # Removes the id if the is_obsolete is found. elif 'is_obsolete' in line: is_obsolete = True if cur_key is not '': if namespace is 'b' and cur_key in bp_terms_parents: bp_terms_parents.pop(cur_key) elif namespace is 'm' and cur_key in mf_terms_parents: mf_terms_parents.pop(cur_key) elif namespace is 'c' and cur_key in cc_terms_parents: cc_terms_parents.pop(cur_key) # If it isn't obsolete then the parents can be added if found. elif line.startswith('is_a:') and not is_obsolete: cur_parents.add(line[6:16]) # Checks which namespace it is in. elif line.startswith('namespace:'): namespace = line[11] if namespace is 'b' and cur_key not in bp_terms_parents: bp_terms_parents[cur_key] = set() elif namespace is 'm' and cur_key not in mf_terms_parents: mf_terms_parents[cur_key] = set() elif namespace is 'c' and cur_key not in cc_terms_parents: cc_terms_parents[cur_key] = set() return bp_terms_parents, mf_terms_parents, cc_terms_parents # Testing of ontology parsing with modified file. # # param: filename - the file to parse the ontology from # return: terms_parents - dictionary; key: term, value: set of parents def testing_ontology_parsing(filename): file = open(filename, "r") terms_parents = {} for line in file: cols = line.split(" ") cols[len(cols) - 1] = cols[len(cols) - 1][0:-1] if cols[0]: terms_parents[cols[0]] = set() for i in range(1, len(cols)): terms_parents[cols[0]].add(cols[i]) return terms_parents
{"/main_runner.py": ["/main.py"], "/main.py": ["/ontology_parsing.py", "/annotation_parsing.py", "/tree_modification.py", "/apriori_algorithm.py", "/association_creation.py"]}
33,366
NGT-Dimka/Films
refs/heads/master
/users/views.py
from django.shortcuts import render_to_response, render, redirect from django.http.response import HttpResponseNotAllowed from .models import User from django.views.generic import TemplateView from .forms import UserForm # Create your views here. class NewUserView(TemplateView): model = User template_name = 'films/user_profile_detail.html' def profile_detail(request): return render_to_response('films/user_profile_detail.html') def registration(request): if request.method not in ["POST", "GET"]: return HttpResponseNotAllowed(permitted_methods=["POST", "GET"]) if request.method == "POST": user_form = UserForm(request.POST) if user_form.is_valid(): user = user_form.save() user.save() return redirect('index') return render(request, 'films/registration.html', {'user_form': user_form}) else: user_form = UserForm() return render(request, 'films/registration.html', {'user_form': user_form})
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,367
NGT-Dimka/Films
refs/heads/master
/films/urls.py
from films.views import FilmsListView, FilmDetailView, post_film_comment from django.conf.urls import url urlpatterns = [ url(r'^$', FilmsListView.as_view(), name='list'), url(r'^(?P<pk>\d+)/$', FilmDetailView.as_view(), name='detail'), url(r'^post_comment/$', post_film_comment, name='post-comment'), ]
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,368
NGT-Dimka/Films
refs/heads/master
/users/models.py
from django.contrib.auth.base_user import AbstractBaseUser from django.contrib.auth.models import PermissionsMixin from django.db import models from .managers import UserManager from django.utils.translation import gettext as _ # Create your models here. class User(AbstractBaseUser, PermissionsMixin): username = models.CharField(_('username'), max_length=200, unique=True, db_index=True) first_name = models.CharField(_('first_name'), max_length=150, blank=True, null=True) last_name = models.CharField(_('last_name'), max_length=150, blank=True, null=True) password = models.CharField(_('password'), max_length=200) email = models.EmailField(_('email'), max_length=200, blank=True, null=True) location = models.CharField(max_length=100, blank=True, verbose_name='Населенный пункт:') birth_date = models.DateField(blank=True, verbose_name='Дата рождения:', null=True) avatar = models.ImageField(blank=True, verbose_name='Аватар:', null=True) is_active = models.BooleanField(_('is_active'), blank=True) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = [] class Meta: verbose_name = 'User' verbose_name_plural = 'Users' def get_full_name(self): full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): return self.username @staticmethod def create_profile(instance, created): if created: User.objects.create(user=instance) @staticmethod def save_profile(instance): instance.User.save()
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,369
NGT-Dimka/Films
refs/heads/master
/films/admin.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin # Register your models here. from films.models import Film, Producer, Comment, Genres admin.site.register(Film) admin.site.register(Producer) admin.site.register(Comment) admin.site.register(Genres)
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,370
NGT-Dimka/Films
refs/heads/master
/users/urls.py
from users.views import registration, NewUserView from django.conf.urls import url urlpatterns = [ url(r'^(?P<pk>\d+)/$', NewUserView.as_view(), name='user_profile'), url('^new_user/$', registration, name='registration'), ]
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,371
NGT-Dimka/Films
refs/heads/master
/films/views.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render_to_response from django.views.generic import ListView, DetailView from django.http.response import HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseRedirect from django.contrib.contenttypes.models import ContentType from films.models import Film from films.forms import FilmComment class FilmsListView(ListView): model = Film queryset = Film.objects.select_related('producer').all() class GenreView(ListView): model = Film Film.objects.select_related('genre').all() template_name = 'films/film_list.html' class FilmDetailView(DetailView): model = Film queryset = Film.objects.select_related('producer') def get_context_data(self, **kwargs): context = super(FilmDetailView, self).get_context_data(**kwargs) context['comment_form'] = FilmComment(data={ 'object_id': self.kwargs['pk'], 'user': self.request.user.id, 'content_type': ContentType.objects.get_for_model(Film) }) context['comments'] = self.get_object().comments return context def post_film_comment(request): if request.method != "POST": return HttpResponseNotAllowed(permitted_methods=['POST']) form = FilmComment(request.POST) if not form.is_valid(): return HttpResponseBadRequest() form.save() return HttpResponseRedirect(redirect_to=request.POST.get('next')) def film_list(request): return render_to_response('films/film_list.html') def film_detail(request): return render_to_response('films/film_detail.html')
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,372
NGT-Dimka/Films
refs/heads/master
/films/migrations/0001_initial.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-29 05:08 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('contenttypes', '0002_remove_content_type_name'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField(verbose_name='Текст')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')), ('object_id', models.PositiveIntegerField(verbose_name='Идентификатор объекта')), ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='Тип содержимого')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')), ], options={ 'verbose_name': 'Комментарий', 'verbose_name_plural': 'Комментарии', }, ), migrations.CreateModel( name='Film', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('poster', models.ImageField(blank=True, upload_to='posters/', verbose_name='Постер фильма')), ('title_film', models.CharField(max_length=255, verbose_name='Название:')), ('year_pub', models.DateField(verbose_name='Выход в прокат:')), ('budget', models.PositiveIntegerField(verbose_name='Бюджет:')), ('fees', models.PositiveIntegerField(verbose_name='Сборы:')), ('duration', models.PositiveSmallIntegerField(verbose_name='Продолжительность, мин.:')), ('content', models.TextField(blank=True, max_length=10000, verbose_name='Сюжет:')), ], options={ 'verbose_name': 'Фильм', 'verbose_name_plural': 'Фильмы', }, ), migrations.CreateModel( name='Genres', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('genres', models.TextField(verbose_name='Жанр')), ], options={ 'verbose_name': 'Жанр', 'verbose_name_plural': 'Жанры', }, ), migrations.CreateModel( name='Producer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('full_name', models.CharField(max_length=255, verbose_name='Полное имя')), ], options={ 'verbose_name': 'Продюсер', 'verbose_name_plural': 'Продюсеры', }, ), migrations.AddField( model_name='film', name='genre', field=models.ManyToManyField(to='films.Genres', verbose_name='Жанр:'), ), migrations.AddField( model_name='film', name='producer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='films.Producer', verbose_name='Продюсер:'), ), ]
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,373
NGT-Dimka/Films
refs/heads/master
/personal/urls.py
from .views import ProducerDetailView from django.conf.urls import url urlpatterns = [ url(r'^(?P<pk>\d+)/$', ProducerDetailView.as_view(), name='producer-detail'), ]
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,374
NGT-Dimka/Films
refs/heads/master
/personal/views.py
from django.views.generic import DetailView from films.models import Producer # Create your views here. class ProducerDetailView(DetailView): model = Producer template_name = 'producer_detail.html'
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,375
NGT-Dimka/Films
refs/heads/master
/users/managers.py
from venv import create from django.contrib.auth.base_user import BaseUserManager from django.contrib.auth.hashers import make_password, check_password, PBKDF2PasswordHasher class PasswordHash(PBKDF2PasswordHasher): algorithm = 'pbkdf2_wrapped_sha1' def encoded(self, sha1_hash, salt): return super(PasswordHash, self).encode(sha1_hash, salt) class UserManager(BaseUserManager): def _create_user(self, username, password, **extra_fields): if not username: raise ValueError('The given username must be set') else: username = self.model(username=username) user = self.model(username=username, **extra_fields) user.set_password(password) token = make_password(password, salt=PasswordHash, hasher='default') if check_password(password, PasswordHash.encoded) is not True: ValueError('Password is not correct') else: user = self.model(is_active=True, is_superuser=True) user = user.save(username=username, password=token) return user def create_user(self, username, password=None, **extra_fields): extra_fields.setdefault('is_superuser', False) return self._create_user(username, password, **extra_fields) def create_superuser(self, username, password, **extra_fields): extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') else: return self._create_user(username, password, **extra_fields)
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,376
NGT-Dimka/Films
refs/heads/master
/films/models.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.http import Http404, HttpResponse from django.contrib.contenttypes.fields import GenericRelation, GenericForeignKey from django.contrib.contenttypes.models import ContentType from users.models import User # Create your models here. class Producer(models.Model): full_name = models.CharField(verbose_name='Полное имя', max_length=255) class Meta: verbose_name = 'Продюсер' verbose_name_plural = 'Продюсеры' def __str__(self): return self.full_name class Genres(models.Model): genres = models.TextField(verbose_name='Жанр') class Meta: verbose_name = 'Жанр' verbose_name_plural = 'Жанры' def __str__(self): return self.genres class Comment(models.Model): user = models.ForeignKey(User, verbose_name='Пользователь') text = models.TextField(verbose_name='Текст') created = models.DateTimeField(verbose_name='Дата создания', auto_now_add=True) content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, verbose_name='Тип содержимого') object_id = models.PositiveIntegerField(verbose_name='Идентификатор объекта') content_object = GenericForeignKey() class Meta: verbose_name = 'Комментарий' verbose_name_plural = 'Комментарии' class Film(models.Model): poster = models.ImageField(upload_to="posters/", verbose_name="Постер фильма", blank=True) title_film = models.CharField(max_length=255, verbose_name='Название:') year_pub = models.DateField(verbose_name='Выход в прокат:') genre = models.ManyToManyField(Genres, verbose_name='Жанр:') producer = models.ForeignKey(Producer, verbose_name='Продюсер:') budget = models.PositiveIntegerField(verbose_name='Бюджет:') fees = models.PositiveIntegerField(verbose_name='Сборы:') duration = models.PositiveSmallIntegerField(verbose_name='Продолжительность, мин.:') content = models.TextField(blank=True, max_length=10000, verbose_name='Сюжет:') comments = GenericRelation(Comment) class Meta: verbose_name = 'Фильм' verbose_name_plural = 'Фильмы' @property def __unicode__(self): return self.title_film @property def get_absolute_url(self): return "/films/%i/" % self.id @staticmethod def film(): try: pass except Film.DoesNotExist: raise Http404 s = Film.title_film + "<br><br>" + Film.genre return HttpResponse(s) def __str__(self): return self.title_film
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,377
NGT-Dimka/Films
refs/heads/master
/films/forms.py
from django import forms from films.models import Comment class FilmComment(forms.ModelForm): class Meta: model = Comment exclude = [] widgets = { 'user': forms.HiddenInput(), 'content_type': forms.HiddenInput(), 'object_id': forms.HiddenInput() }
{"/users/views.py": ["/users/models.py"], "/films/urls.py": ["/films/views.py"], "/users/models.py": ["/users/managers.py"], "/films/admin.py": ["/films/models.py"], "/users/urls.py": ["/users/views.py"], "/films/views.py": ["/films/models.py", "/films/forms.py"], "/personal/urls.py": ["/personal/views.py"], "/personal/views.py": ["/films/models.py"], "/films/models.py": ["/users/models.py"], "/films/forms.py": ["/films/models.py"]}
33,378
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/xlnet.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import numpy as np import tensorflow as tf import torch from argparse import ArgumentParser from os.path import abspath, join from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging from ..config.xlnet import PreTrained from ...models.xlnet import ForQA, ForSeqClass, LMHead GLUE_TASKS_NUM_LABELS = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() log = logging.get_logger(__name__) def build_map(model, cfg, tf_weights=None): tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if ( hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights ): tf_to_pt_map[ "model/sequnece_summary/summary/kernel" ] = model.sequence_summary.summary.weight tf_to_pt_map[ "model/sequnece_summary/summary/bias" ] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and cfg.finetune is not None and f"model/regression_{cfg.finetune}/logit/kernel" in tf_weights ): tf_to_pt_map[f"model/regression_{cfg.finetune}/logit/kernel"] = model.logits_proj.weight tf_to_pt_map[f"model/regression_{cfg.finetune}/logit/bias"] = model.logits_proj.bias model = model.transformer tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) for i, b in enumerate(model.layer): layer_str = f"model/transformer/layer_{i}/" tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) if cfg.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_src_weights(model, config, src_path): init_vars = tf.train.list_variables(src_path) tf_weights = {} for name, shape in init_vars: log.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(src_path, name) tf_weights[name] = array tf_to_pt_map = build_map(model, config, tf_weights) for name, p in tf_to_pt_map.items(): log.info(f"Importing {name}") if name not in tf_weights: log.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): log.info("Transposing") array = np.transpose(array) if isinstance(p, list): assert ( len(p) == array.shape[0] ), f"Pointer length {len(p)} and array length {array.shape[0]} mismatched" for i, p_i in enumerate(p): arr_i = array[i, ...] assert p_i.shape == arr_i.shape p_i.data = torch.from_numpy(arr_i) else: assert p.shape == array.shape p.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) log.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model def to_pytorch(src_path, bert_config_file, save_path, finetune=None): cfg = PreTrained.from_json_file(bert_config_file) print(f"Building from config: {cfg}") finetune = finetune.lower() if finetune is not None else "" if finetune in GLUE_TASKS_NUM_LABELS: cfg.finetune = finetune cfg.n_labels = GLUE_TASKS_NUM_LABELS[finetune] m = ForSeqClass(cfg) elif "squad" in finetune: cfg.finetune = finetune m = ForQA(cfg) else: m = LMHead(cfg) load_src_weights(m, cfg, src_path) w = join(save_path, WEIGHTS_NAME) print(f"Saving to: {abspath(w)}") torch.save(m.state_dict(), w) c = join(save_path, CONFIG_NAME) print(f"Saving config to: {abspath(c)}") with open(c, "w", encoding="utf-8") as f: f.write(cfg.to_json_string()) if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--cfg_path", default=None, type=str, required=True) x.add_argument("--save_path", default=None, type=str, required=True) x.add_argument("--finetune", default=None, type=str) y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.save_path, y.finetune)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,379
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/bert.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import numpy as np import re import tensorflow as tf import torch from argparse import ArgumentParser from os.path import abspath from transformers.utils import logging from ..config.bert import PreTrained from ...models.bert import ForPreTraining logging.set_verbosity_info() log = logging.get_logger(__name__) _SKIP = [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", ] def load_src_weights(model, src_path): src_path = abspath(src_path) log.info(f"Loading from: {src_path}") xs = tf.train.list_variables(src_path) assert len(xs) > 0 ns, ws = _load_weights(xs, src_path) for n, w in zip(ns, ws): ss = n.split("/") if any(x in _SKIP for x in ss): log.info(f"Skipping {'/'.join(ss)}") continue p = model for s in ss: if re.fullmatch(r"[A-Za-z]+_\d+", s): scopes = re.split(r"_(\d+)", s) else: scopes = [s] if scopes[0] == "kernel" or scopes[0] == "gamma": p = getattr(p, "weight") elif scopes[0] == "output_bias" or scopes[0] == "beta": p = getattr(p, "bias") elif scopes[0] == "output_weights": p = getattr(p, "weight") elif scopes[0] == "squad": p = getattr(p, "classifier") else: try: p = getattr(p, scopes[0]) except AttributeError: log.info(f"Skipping {'/'.join(ss)}") continue if len(scopes) >= 2: p = p[int(scopes[1])] if s[-11:] == "_embeddings": p = getattr(p, "weight") elif s == "kernel": w = np.transpose(w) assert p.shape == w.shape p.data = torch.from_numpy(w) return model def _load_weights(xs, src_path): ns = [] ws = {} for n, shape in xs: log.info(f"Loading TF weight {n} with shape {shape}") ns.append(n) ws[n] = tf.train.load_variable(src_path, n) return ns, ws def to_pytorch(src_path, cfg_path, save_path): cfg = PreTrained.from_json_file(cfg_path) print(f"Building from config: {cfg}") m = ForPreTraining(cfg) load_src_weights(m, src_path) print(f"Saving to: {save_path}") torch.save(m.state_dict(), save_path) if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--cfg_path", default=None, type=str, required=True) x.add_argument("--save_path", default=None, type=str, required=True) y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.save_path)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,380
quantapix/qnarre
refs/heads/main
/qnarre/try/add.py
# %% import torch import triton import triton.language as tl @triton.jit def add_kernel(x1_ptr, x2_ptr, y_ptr, n, BLOCK: tl.constexpr): pid = tl.program_id(axis=0) offs = pid * BLOCK + tl.arange(0, BLOCK) mask = offs < n x1 = tl.load(x1_ptr + offs, mask=mask) x2 = tl.load(x2_ptr + offs, mask=mask) y = x1 + x2 tl.store(y_ptr + offs, y, mask=mask) # %% def add(x1: torch.Tensor, x2: torch.Tensor): y = torch.empty_like(x1) assert x1.is_cuda and x2.is_cuda and y.is_cuda n = y.numel() grid = lambda x: (triton.cdiv(n, x["BLOCK"]),) add_kernel[grid](x1, x2, y, n, BLOCK=1024) return y # %% torch.manual_seed(0) size = 98432 x1 = torch.rand(size, device="cuda") x2 = torch.rand(size, device="cuda") y_ref = x1 + x2 y_triton = add(x1, x2) print(f"ref={y_ref}") print(f"triton={y_triton}") print( f"The maximum difference between ref and triton is " f"{torch.max(torch.abs(y_ref - y_triton))}" ) # %% @triton.testing.perf_report( triton.testing.Benchmark( x_names=["size"], x_vals=[2**i for i in range(12, 28, 1)], x_log=True, line_arg="provider", line_vals=["triton", "torch"], line_names=["Triton", "Torch"], styles=[("blue", "-"), ("green", "-")], ylabel="GB/s", plot_name="vector-add-performance", args={}, ) ) def benchmark(size, provider): x1 = torch.rand(size, device="cuda", dtype=torch.float32) x2 = torch.rand(size, device="cuda", dtype=torch.float32) qs = [0.5, 0.2, 0.8] if provider == "torch": ms, min, max = triton.testing.do_bench(lambda: x1 + x2, quantiles=qs) if provider == "triton": ms, min, max = triton.testing.do_bench(lambda: add(x1, x2), quantiles=qs) y = lambda ms: 12 * size / ms * 1e-6 return y(ms), y(max), y(min) # %% benchmark.run(print_data=True, show_plots=True) # %%
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,381
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/patcher.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import re import collections as co from difflib import unified_diff from .log import Logger from .base import config from .resource import Resource from .nominals import flags, para_join, para_split log = Logger(__name__) fixes = ((r'(?P<lf>xxx?){2,}(?P<rt> ?(Date|Sent|To|Cc|Bcc|Subject): )', r'\g<lf>\g<rt>'), ) class Fixer: def __init__(self, fixes=(), **kw): super().__init__(**kw) self.fixes = fixes self.re_fixes = tuple((re.compile(flags + p), r) for p, r in fixes) def __repr__(self): return '{}({!r})'.format(type(self).__name__, self.fixes) def fix(self, txt): if isinstance(txt, tuple): return para_split(self.fix(para_join(txt))) for p, r in self.re_fixes: txt = p.sub(r, txt) return txt class Fixers(Resource): _res_path = config.qnar_dst + 'fixers.qnr' @classmethod def globals(cls): return globals() chunk_re = re.compile(r'^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@', re.ASCII) class Chunk(co.namedtuple('Chunk', 'src tgt lns')): def applier(self, si, ti, src): ss = self.src.start while si < ss: i, sl = next(src) assert i == si yield sl si += 1 ti += 1 assert si == self.src.start and ti == self.tgt.start for l in self.lns: if l.startswith(('-', ' ')): i, sl = next(src) assert i == si and l[1:] == sl si += 1 if l.startswith('-'): continue else: assert l.startswith('+') yield l[1:] ti += 1 assert si == self.src.stop and ti == self.tgt.stop return si def diff_parser(udiff): c = ls = lt = None for ln in udiff: ln = ln or ' ' m = chunk_re.match(ln) if m: if c: assert ls == c.src.stop and lt == c.tgt.stop yield c._replace(lns=tuple(c.lns)) c = [] for i in range(0, 4, 3): s = int(m.group(i + 1)) n = int(m.group(i + 3)) if m.group(i + 3) else 1 c.append(range(s, s + n)) c = Chunk(*c, []) ls, lt = c.src.start, c.tgt.start continue elif c: if ln.startswith('-'): ls += 1 elif ln.startswith('+'): lt += 1 else: assert ln.startswith(' ') ls += 1 lt += 1 c.lns.append(ln) if c: assert ls == c.src.stop and lt == c.tgt.stop yield c._replace(lns=tuple(c.lns)) class Patcher: @classmethod def create(cls, src, dst): ud = unified_diff(src.splitlines(), dst.splitlines()) cs = tuple(c for c in diff_parser(ud)) return cls(cs) def __init__(self, chunks): super().__init__() self.chunks = chunks def __eq__(self, other): if isinstance(other, type(self)): return self.chunks == other.chunks return NotImplemented def __repr__(self): return '{}({!r})'.format(type(self).__name__, self.chunks) def patch(self, txt): if isinstance(txt, tuple): return para_split(self.patch(para_join(txt))) r = [] si = ti = 1 s = enumerate(txt.splitlines(), start=si) for c in self.chunks: a = c.applier(si, ti, s) while True: try: r.append(next(a)) except StopIteration as e: si = e.value break ti = len(r) + 1 for _, l in s: r.append(l) return '\n'.join(r) class Patchers(Resource): _res_path = config.qnar_dst + 'patchers.qnr' @classmethod def globals(cls): return globals()
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,382
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/funnel.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch import re import numpy as np import tensorflow as tf from argparse import ArgumentParser from os.path import abspath from transformers.utils import logging from ..config.funnel import PreTrained from ...models.funnel import Base, Model logging.set_verbosity_info() log = logging.get_logger(__name__) def load_src_weights(model, config, tf_checkpoint_path): tf_path = abspath(tf_checkpoint_path) log.info(f"Converting TensorFlow checkpoint from {tf_path}") init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: log.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) _layer_map = { "k": "k_head", "q": "q_head", "v": "v_head", "o": "post_proj", "layer_1": "linear_1", "layer_2": "linear_2", "rel_attn": "attention", "ff": "ffn", "kernel": "weight", "gamma": "weight", "beta": "bias", "lookup_table": "weight", "word_embedding": "word_embeddings", "input": "embeddings", } for name, array in zip(names, arrays): name = name.split("/") if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", ] for n in name ): log.info(f"Skipping {'/'.join(name)}") continue if name[0] == "generator": continue pointer = model skipped = False for m_name in name[1:]: if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch( r"layer_\d+", m_name ): layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0]) if layer_index < config.n_lays: block_idx = 0 while layer_index >= config.block_sizes[block_idx]: layer_index -= config.block_sizes[block_idx] block_idx += 1 pointer = pointer.blocks[block_idx][layer_index] else: layer_index -= config.n_lays pointer = pointer.layers[layer_index] elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention): pointer = pointer.r_kernel break elif m_name in _layer_map: pointer = getattr(pointer, _layer_map[m_name]) else: try: pointer = getattr(pointer, m_name) except AttributeError: print(f"Skipping {'/'.join(name)}", array.shape) skipped = True break if not skipped: if len(pointer.shape) != len(array.shape): array = array.reshape(pointer.shape) if m_name == "kernel": array = np.transpose(array) pointer.data = torch.from_numpy(array) return model def to_pytorch(src_path, cfg_path, save_path, base): cfg = PreTrained.from_json_file(cfg_path) print(f"Building from config: {cfg}") m = Base(cfg) if base else Model(cfg) load_src_weights(m, cfg, src_path) print(f"Saving to: {save_path}") torch.save(m.state_dict(), save_path) if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--cfg_path", default=None, type=str, required=True) x.add_argument("--save_path", default=None, type=str, required=True) x.add_argument("--base", action="store_true") y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.save_path, y.base)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,383
quantapix/qnarre
refs/heads/main
/tools/triton/python/test/unit/runtime/test_cache.py
import os import shutil import pytest import torch import triton import triton.language as tl from triton.runtime.jit import JITFunction tmpdir = ".tmp" @triton.jit def function_1(i): i = i + 1 i = function_2(i) return i @triton.jit def function_2(i): i = i + 1 return i @triton.jit def kernel(X, i, BLOCK: tl.constexpr): i = i + 1 i = function_1(i) tl.store(X, i) @triton.jit(do_not_specialize=["i"]) def kernel_nospec(X, i, BLOCK: tl.constexpr): i = i + 1 i = function_1(i) tl.store(X, i) def apply_src_change(target, old, new): kernel.hash = None function_1.hash = None function_2.hash = None function_1.src = function_1.src.replace(old, new) target.src = target.src.replace(old, new) ret = target.cache_key target.src = target.src.replace(new, old) return ret def test_nochange(): baseline = kernel.cache_key updated = apply_src_change(kernel, 'i + 1', 'i + 1') assert baseline == updated def test_toplevel_change(): baseline = kernel.cache_key updated = apply_src_change(kernel, 'i + 1', 'i + 2') assert baseline != updated def test_nested1_change(): baseline = kernel.cache_key updated = apply_src_change(function_1, 'i + 1', 'i + 2') assert baseline != updated def reset_tmp_dir(): os.environ["TRITON_CACHE_DIR"] = tmpdir if os.path.exists(tmpdir): shutil.rmtree(tmpdir) def test_reuse(): counter = 0 def inc_counter(*args, **kwargs): nonlocal counter counter += 1 JITFunction.cache_hook = inc_counter reset_tmp_dir() x = torch.empty(1, dtype=torch.int32, device='cuda') for i in range(10): kernel[(1,)](x, 1, BLOCK=1024) assert counter == 1 @pytest.mark.parametrize('mode', ['enable', 'disable']) def test_specialize(mode): counter = 0 def inc_counter(*args, **kwargs): nonlocal counter counter += 1 JITFunction.cache_hook = inc_counter reset_tmp_dir() x = torch.empty(1, dtype=torch.int32, device='cuda') function = {'enable': kernel, 'disable': kernel_nospec}[mode] target = {'enable': 3, 'disable': 1}[mode] for i in [1, 2, 4, 8, 16, 32]: function[(1,)](x, i, BLOCK=512) assert counter == target def test_constexpr_not_callable() -> None: @triton.jit def kernel(X, c: tl.constexpr): tl.store(X, 2) x = torch.empty(1, dtype=torch.int32, device='cuda') error = False try: kernel[(1, )](x, c="str") except BaseException: error = True assert error is False # try and catch try: kernel[(1, )](x, c=tl.abs) except BaseException: error = True assert error is True def test_jit_warmup_cache() -> None: @triton.jit def kernel_add(a, b, o, N: tl.constexpr): idx = tl.arange(0, N) tl.store(o + idx, tl.load(a + idx) + tl.load(b + idx)) args = [ torch.randn(32, dtype=torch.float32, device="cuda"), torch.randn(32, dtype=torch.float32, device="cuda"), torch.randn(32, dtype=torch.float32, device="cuda"), 32, ] assert len(kernel_add.cache) == 0 kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add.cache) == 1 kernel_add.warmup(*args, grid=(1,)) assert len(kernel_add.cache) == 1 kernel_add.warmup(*args, grid=(1,)) assert len(kernel_add.cache) == 1 def test_jit_debug() -> None: @triton.jit def kernel_add(a, b, o, N: tl.constexpr): idx = tl.arange(0, N) tl.device_assert(idx < 32, "idx < 32") tl.store(o + idx, tl.load(a + idx) + tl.load(b + idx)) device = torch.cuda.current_device() assert len(kernel_add.cache[device]) == 0 kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add.cache[device]) == 1 kernel_add.debug = False kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add.cache[device]) == 1 kernel_add.debug = True kernel_add.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add.cache[device]) == 2 bins = list(kernel_add.cache[device].values()) assert bins[0].asm['ttir'] != bins[1].asm['ttir'] @triton.jit def add_fn(a, b, o, N: tl.constexpr): idx = tl.arange(0, N) tl.store(o + idx, tl.load(a + idx) + tl.load(b + idx)) def test_jit_noinline() -> None: @triton.jit def kernel_add_device(a, b, o, N: tl.constexpr): add_fn(a, b, o, N) device = torch.cuda.current_device() assert len(kernel_add_device.cache[device]) == 0 kernel_add_device.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add_device.cache[device]) == 1 bins = list(kernel_add_device.cache[device].values()) inline_ttir = bins[0].asm['ttir'] add_fn.noinline = True add_fn.hash = None kernel_add_device.hash = None kernel_add_device.cache[device].clear() kernel_add_device.warmup(torch.float32, torch.float32, torch.float32, 32, grid=(1,)) assert len(kernel_add_device.cache[device]) == 1 bins = list(kernel_add_device.cache[device].values()) noinline_ttir = bins[0].asm['ttir'] assert inline_ttir != noinline_ttir def test_memory_leak() -> None: @triton.jit def kernel(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 10 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0 + tl.zeros([XBLOCK], tl.int32)), tmp0, xmask)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,384
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/convbert.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import argparse from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert from transformers.utils import logging logging.set_verbosity_info() def load_tf_weights_in_convbert(model, config, tf_checkpoint_path): tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") init_vars = tf.train.list_variables(tf_path) tf_data = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) tf_data[name] = array param_mapping = { "embeddings.word_embeddings.weight": "electra/embeddings/word_embeddings", "embeddings.position_embeddings.weight": "electra/embeddings/position_embeddings", "embeddings.token_type_embeddings.weight": "electra/embeddings/token_type_embeddings", "embeddings.LayerNorm.weight": "electra/embeddings/LayerNorm/gamma", "embeddings.LayerNorm.bias": "electra/embeddings/LayerNorm/beta", "embeddings_project.weight": "electra/embeddings_project/kernel", "embeddings_project.bias": "electra/embeddings_project/bias", } if config.n_groups > 1: group_dense_name = "g_dense" else: group_dense_name = "dense" for j in range(config.n_lays): param_mapping[ f"encoder.layer.{j}.attention.self.query.weight" ] = f"electra/encoder/layer_{j}/attention/self/query/kernel" param_mapping[ f"encoder.layer.{j}.attention.self.query.bias" ] = f"electra/encoder/layer_{j}/attention/self/query/bias" param_mapping[ f"encoder.layer.{j}.attention.self.key.weight" ] = f"electra/encoder/layer_{j}/attention/self/key/kernel" param_mapping[ f"encoder.layer.{j}.attention.self.key.bias" ] = f"electra/encoder/layer_{j}/attention/self/key/bias" param_mapping[ f"encoder.layer.{j}.attention.self.value.weight" ] = f"electra/encoder/layer_{j}/attention/self/value/kernel" param_mapping[ f"encoder.layer.{j}.attention.self.value.bias" ] = f"electra/encoder/layer_{j}/attention/self/value/bias" param_mapping[ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel" param_mapping[ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel" param_mapping[ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.bias" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/bias" param_mapping[ f"encoder.layer.{j}.attention.self.conv_kernel_layer.weight" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel" param_mapping[ f"encoder.layer.{j}.attention.self.conv_kernel_layer.bias" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias" param_mapping[ f"encoder.layer.{j}.attention.self.conv_out_layer.weight" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel" param_mapping[ f"encoder.layer.{j}.attention.self.conv_out_layer.bias" ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/bias" param_mapping[ f"encoder.layer.{j}.attention.output.dense.weight" ] = f"electra/encoder/layer_{j}/attention/output/dense/kernel" param_mapping[ f"encoder.layer.{j}.attention.output.LayerNorm.weight" ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/gamma" param_mapping[ f"encoder.layer.{j}.attention.output.dense.bias" ] = f"electra/encoder/layer_{j}/attention/output/dense/bias" param_mapping[ f"encoder.layer.{j}.attention.output.LayerNorm.bias" ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/beta" param_mapping[ f"encoder.layer.{j}.intermediate.dense.weight" ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel" param_mapping[ f"encoder.layer.{j}.intermediate.dense.bias" ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias" param_mapping[ f"encoder.layer.{j}.output.dense.weight" ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/kernel" param_mapping[ f"encoder.layer.{j}.output.dense.bias" ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/bias" param_mapping[ f"encoder.layer.{j}.output.LayerNorm.weight" ] = f"electra/encoder/layer_{j}/output/LayerNorm/gamma" param_mapping[ f"encoder.layer.{j}.output.LayerNorm.bias" ] = f"electra/encoder/layer_{j}/output/LayerNorm/beta" for param in model.named_parameters(): param_name = param[0] retriever = attrgetter(param_name) result = retriever(model) tf_name = param_mapping[param_name] value = torch.from_numpy(tf_data[tf_name]) logger.info(f"TF: {tf_name}, PT: {param_name} ") if tf_name.endswith("/kernel"): if not tf_name.endswith("/intermediate/g_dense/kernel"): if not tf_name.endswith("/output/g_dense/kernel"): value = value.T if tf_name.endswith("/depthwise_kernel"): value = value.permute(1, 2, 0) # 2, 0, 1 if tf_name.endswith("/pointwise_kernel"): value = value.permute(2, 1, 0) # 2, 1, 0 if tf_name.endswith("/conv_attn_key/bias"): value = value.unsqueeze(-1) result.data = value return model def convert_orig_tf1_checkpoint_to_pytorch( tf_checkpoint_path, convbert_config_file, pytorch_dump_path ): conf = ConvBertConfig.from_json_file(convbert_config_file) model = ConvBertModel(conf) model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) model.save_pretrained(pytorch_dump_path) tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) tf_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path.", ) parser.add_argument( "--convbert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained ConvBERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_orig_tf1_checkpoint_to_pytorch( args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path )
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,385
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/fast/realm.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json from tokenizers import normalizers from ....tokens.base import BatchEncoding from ....tokens.fast import PreTrainedTokenizerFast from ....tokens.utils import PaddingStrategy from ..realm import Tokenizer as Realm VOCAB_FS = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} VOCAB_MAP = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt", "google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt", "google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt", "google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt", "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont", "google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json", "google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json", "google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json", "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json", }, } INPUT_CAPS = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } PRETRAINED_INIT_CONFIGURATION = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class Tokenizer(PreTrainedTokenizerFast): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION input_caps = INPUT_CAPS slow_tokenizer_class = Realm def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk="[UNK]", sep="[SEP]", pad="[PAD]", cls="[CLS]", msk="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kw, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk=unk, sep=sep, pad=pad, cls=cls, msk=msk, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kw, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kw): kw["padding"] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kw.pop("text_pair", None) return_tensors = kw.pop("return_tensors", None) output_data = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for i, candidate_text in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[i] else: candidate_text_pair = None encoded_candidates = super().__call__( candidate_text, candidate_text_pair, return_tensors=None, **kw ) encoded_input_ids = encoded_candidates.get("input_ids") encoded_attention_mask = encoded_candidates.get("attention_mask") encoded_token_type_ids = encoded_candidates.get("token_type_ids") if encoded_input_ids is not None: output_data["input_ids"].append(encoded_input_ids) if encoded_attention_mask is not None: output_data["attention_mask"].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(encoded_token_type_ids) output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0) return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, toks_0, toks_1=None): y = [self.cls_token_id] + toks_0 + [self.sep_token_id] if toks_1: y += toks_1 + [self.sep_token_id] return y def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): sep = [self.sep_token_id] cls = [self.cls_token_id] if toks_1 is None: return len(cls + toks_0 + sep) * [0] return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1] def save_vocabulary(self, dir, pre=None): return tuple(self._tokenizer.model.save(dir, name=pre))
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,386
quantapix/qnarre
refs/heads/main
/qnarre/models/gpt.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # https://openai.com/blog/language-unsupervised/ import torch from dataclasses import dataclass from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import forward as qf from ..core import output as qo from ..core import attention as qa from ..prep.config.openai import PreTrained log = logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = qc.Embed(cfg.s_vocab, cfg.n_embed, **kw) self.pos_emb = qc.Embed(cfg.n_pos, cfg.n_embed, **kw) self.register_buffer("pos_ids", torch.arange(cfg.n_pos)) self.drop = qc.Dropout(cfg.drop_embed, **kw) self.lays = qc.Stack([Layer(scale=True, **kw) for _ in range(cfg.n_lays)]) def forward(self, x, head_m=None, mask=None, pos=None, typ=None, x_emb=None, **kw): cfg = self.cfg if x is None: s = x_emb.size()[:-1] else: assert x_emb is None s = x.size() x = x.view(-1, s[-1]) if x_emb is None: x_emb = self.tok_emb(x) if mask is not None: mask = self.get_mask(mask, s) head_m = self.get_head_m(head_m, cfg.n_lays) if pos is None: pos = self.pos_ids[None, : s[-1]] pos = self.pos_emb(pos) if typ is None: typ = 0 else: typ = self.tok_emb(typ.view(-1, typ.size(-1))) y = self.drop(x_emb + pos + typ) attns = hiddens = () for i, lay in enumerate(self.lays): hiddens += (y,) ys = lay(y, mask=mask, head_m=head_m[i]) y = ys[0] attns += (ys[1],) y = y.view(*(s + (y.size(-1),))) hiddens += (y,) return qo.Base(y, attns, hiddens) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def post_proj(self, x): cfg = self.cfg b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0] if cfg.PAD is None: n = -1 else: assert b == 1 n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n] class LMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.s_vocab, bias=False, **kw) def forward(self, x, labels=None, **kw): ys = self.model(x, **kw) y = self.proj(ys[0]) loss = None if labels is not None: sl = y[..., :-1, :].contiguous() ls = labels[..., 1:].contiguous() loss = nn.CrossEntropyLoss()(sl.view(-1, sl.size(-1)), ls.view(-1)) ys = (y,) + ys[1:] + (loss,) return qo.WithLoss(*ys) @dataclass class Output(qc.Output): logits: tuple = None mc_logits: tuple = None attns: tuple = None hiddens: tuple = None loss: tuple = None mc_loss: tuple = None class DualHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) cfg.n_labels = 1 self.model = Model(**kw) self.sum = qc.SeqSummary(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.s_vocab, bias=False, **kw) def forward(self, x, mc_x=None, labels=None, mc_labels=None, **kw): ys = self.model(x, **kw) y = self.proj(ys[0]) mc_y = self.sum(ys[0], mc_x).squeeze(-1) loss, mc_loss = None, None if mc_labels is not None: mc_loss = nn.CrossEntropyLoss()(mc_y.view(-1, mc_y.size(-1)), mc_labels.view(-1)) if labels is not None: sl = y[..., :-1, :].contiguous() ls = labels[..., 1:].contiguous() loss = nn.CrossEntropyLoss()(sl.view(-1, sl.size(-1)), ls.view(-1)) ys = (y, mc_y) + ys[1:] + (loss, mc_loss) return Output(*ys) class Layer(qc.Module): hs = qc.Hypers({"d_model", "eps"}) def __init__(self, scale=False, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m = cfg.d_model self.attn = Attention(scale, **kw) self.norm_attn = qc.LayerNorm(m, cfg.eps, **kw) self.proj = MLP(4 * m, **kw) self.norm = qc.LayerNorm(m, cfg.eps, **kw) def forward(self, x, mask, head_m, **kw): ys = self.attn(x, mask, head_m) y = self.norm_attn(x + ys[0]) y = self.norm(y + self.proj(y)) y = [y] + ys[1:] return y class MLP(qc.Module): hs = qc.Hypers({"act", "drop"}) def __init__(self, d_ff, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) m = cfg.d_model self.conv = qc.Conv1D(d_ff, m, **kw) self.proj = qc.Conv1D(m, d_ff, **kw) self.act = qu.activation(cfg.act) self.drop = qc.Dropout(cfg.drop, **kw) def forward(self, x): y = self.act(self.conv(x)) y = self.drop(self.proj(y)) return y class Attention(qc.Module): hs = qc.Hypers({"d_model", "drop_attn", "drop", "n_heads", "n_pos"}) def __init__(self, scale=False, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) cfg.scale = scale n, d = cfg.n_heads, cfg.d_model assert d % n == 0 self.attn = qc.Conv1D(d * 3, d, **kw) self.proj = qc.Conv1D(d, d, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.drop = qc.Dropout(cfg.drop, **kw) p = cfg.n_pos self.register_buffer("bias", torch.tril(torch.ones(p, p)).view(1, 1, p, p)) def forward(self, x, mask, head_m, **kw): cfg = self.cfg q, k, v = self.attn(x).split(cfg.d_model, dim=2) q = self.split_heads(q) k = self.split_heads(k, k=True) v = self.split_heads(v) ys = self.scores(q, k, v, mask, head_m) y = self.join_heads(ys[0]) y = (self.drop(self.proj(y)),) return y + ys[1:] split_heads = qa.split_heads join_heads = qa.join_heads def scores(self, q, k, v, mask, head_m, **kw): cfg = self.cfg a = torch.matmul(q, k) if cfg.scale: a = a / (v.size(-1) ** 0.5) causal = self.bias[:, :, : a.size(-2), : a.size(-1)] a = a * causal + -1e4 * (1 - causal) if mask is not None: a = a + mask a = self.drop_attn(F.softmax(a, dim=-1)) if head_m is not None: a = a * head_m return torch.matmul(a, v), a
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,387
quantapix/qnarre
refs/heads/main
/qnarre/models/decision_transfo.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch import torch.utils.checkpoint from dataclasses import dataclass from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.decision_transfo import PreTrained log = logging.get_logger(__name__) from ...pytorch_utils import Conv1D is_amp_available = True from torch.cuda.amp import autocast LIST = [ "edbeeching/decision-transformer-gym-hopper-medium", ] # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2 class DecisionTransformerGPT2Attention(qc.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() max_positions = config.n_pos self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( 1, 1, max_positions, max_positions ), ) self.register_buffer("masked_bias", torch.tensor(-1e4)) self.embed_dim = config.d_model self.n_heads = config.n_heads self.head_dim = self.embed_dim // self.n_heads self.split_size = self.embed_dim if self.head_dim * self.n_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by n_heads (got `embed_dim`: {self.embed_dim} and `n_heads`: {self.n_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention # Layer-wise attention scaling, reordering, and upcasting self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = qc.Dropout(config.drop_attn) self.drop_resid = qc.Dropout(config.drop_resid) def _attn(self, query, key, value, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / (value.size(-1) ** 0.5) # Layer-wise attention scaling if self.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(self.layer_idx + 1) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[ :, :, key_length - query_length : key_length, :key_length ].bool() attn_weights = torch.where( causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype) ) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = F.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) bsz, n_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() # Preallocate attn_weights for `baddbmm` attn_weights = torch.empty( bsz * n_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device ) # Compute Scale Factor scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) if is_amp_available: with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( -1, dk, k_seq_len ) attn_weights = torch.baddbmm( attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor ) attn_weights = attn_weights.reshape(bsz, n_heads, q_seq_len, k_seq_len) else: q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( -1, dk, k_seq_len ) attn_weights = torch.baddbmm( attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor ) attn_weights = attn_weights.reshape(bsz, n_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[ :, :, key_length - query_length : key_length, :key_length ].bool() attn_weights = torch.where( causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype) ) if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = F.softmax(attn_weights, dim=-1) # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise if attn_weights.dtype != torch.float32: raise RuntimeError( "Error with upcasting, attn_weights does not have dtype torch.float32" ) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _split_heads(self, tensor, n_heads, attn_head_size): new_shape = tensor.size()[:-1] + (n_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, n_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (n_heads * attn_head_size,) return tensor.view(new_shape) def forward( self, hiddens, layer_past=None, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, y_cache=False, output_attentions=False, ): if enc_hiddens is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`." ) query = self.q_attn(hiddens) key, value = self.c_attn(enc_hiddens).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: query, key, value = self.c_attn(hiddens).split(self.split_size, dim=2) query = self._split_heads(query, self.n_heads, self.head_dim) key = self._split_heads(key, self.n_heads, self.head_dim) value = self._split_heads(value, self.n_heads, self.head_dim) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if y_cache is True: present = (key, value) else: present = None if self.reorder_and_upcast_attn: attn_output, attn_weights = self._upcast_and_reordered_attn( query, key, value, attention_mask, head_mask ) else: attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.n_heads, self.head_dim) attn_output = self.c_proj(attn_output) attn_output = self.drop_resid(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # a, present, (attns) # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2 class DecisionTransformerGPT2MLP(qc.Module): def __init__(self, d_ff, config): super().__init__() embed_dim = config.d_model self.c_fc = Conv1D(d_ff, embed_dim) self.c_proj = Conv1D(embed_dim, d_ff) self.act = qu.activation(config.act) self.drop = qc.Dropout(config.drop_resid) def forward(self, x): y = self.c_fc(x) y = self.act(y) y = self.c_proj(y) y = self.drop(y) return y # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2 class DecisionTransformerGPT2Block(qc.Module): def __init__(self, config, layer_idx=None): super().__init__() d_model = config.d_model inner_dim = config.n_inner if config.n_inner is not None else 4 * d_model self.ln_1 = qc.LayerNorm(d_model, eps=config.eps) self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx) self.ln_2 = qc.LayerNorm(d_model, eps=config.eps) if config.add_cross_attention: self.crossattention = DecisionTransformerGPT2Attention( config, is_cross_attention=True, layer_idx=layer_idx ) self.ln_cross_attn = qc.LayerNorm(d_model, eps=config.eps) self.mlp = DecisionTransformerGPT2MLP(inner_dim, config) def forward( self, hiddens, layer_past=None, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, y_cache=False, output_attentions=False, ): residual = hiddens hiddens = self.ln_1(hiddens) attn_outputs = self.attn( hiddens, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, y_cache=y_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] # output_attn: a, present, (attns) outputs = attn_outputs[1:] # residual connection hiddens = attn_output + residual if enc_hiddens is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `enc_hiddens` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hiddens hiddens = self.ln_cross_attn(hiddens) cross_attn_outputs = self.crossattention( hiddens, attention_mask=attention_mask, head_mask=head_mask, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attn_output = cross_attn_outputs[0] # residual connection hiddens = residual + attn_output outputs = ( outputs + cross_attn_outputs[2:] ) # add cross attns if we output attention weights residual = hiddens hiddens = self.ln_2(hiddens) feed_forward_model_states = self.mlp(hiddens) # residual connection hiddens = residual + feed_forward_model_states if y_cache: outputs = (hiddens,) + outputs else: outputs = (hiddens,) + outputs[1:] return outputs # hiddens, present, (attns, crosses) class GPT2Model(PreTrained): def __init__(self, config): super().__init__(config) self.embed_dim = config.d_model self.wte = qc.Embed(config.s_vocab, self.embed_dim) self.wpe = qc.Embed(config.n_pos, self.embed_dim) self.drop = qc.Dropout(config.drop_embed) self.h = nn.ModuleList( [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.n_lays)] ) self.ln_f = qc.LayerNorm(self.embed_dim, eps=config.eps) # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward def forward( self, input_ids=None, caches=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, enc_hiddens=None, encoder_attention_mask=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) y_cache = y_cache if y_cache is not None else self.config.y_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if caches is None: past_length = 0 caches = tuple([None] * len(self.h)) else: past_length = caches[0][0].size(-2) if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # GPT2Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 if self.config.add_cross_attention and enc_hiddens is not None: encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_lays) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hiddens = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hiddens = hiddens + token_type_embeds hiddens = self.drop(hiddens) output_shape = input_shape + (hiddens.size(-1),) presents = () if y_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, caches)): # Model parallel if self.model_parallel: torch.cuda.set_device(hiddens.device) # Ensure layer_past is on same device as hiddens (might not be correct) if layer_past is not None: layer_past = tuple(past_state.to(hiddens.device) for past_state in layer_past) # Ensure that attention_mask is always on the same device as hiddens if attention_mask is not None: attention_mask = attention_mask.to(hiddens.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hiddens.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if self.gradient_checkpointing and self.training: if y_cache: log.warning( "`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..." ) y_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, y_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hiddens, None, attention_mask, head_mask[i], enc_hiddens, encoder_attention_mask, ) else: outputs = block( hiddens, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_attention_mask, y_cache=y_cache, output_attentions=output_attentions, ) hiddens = outputs[0] if y_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if y_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if y_cache else 2],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hiddens = hiddens.to("cuda:" + str(k + 1)) hiddens = self.ln_f(hiddens) hiddens = hiddens.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if not return_dict: return tuple( v for v in [ hiddens, presents, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return qo.CachesCrosses( y=hiddens, caches=presents, hiddens=all_hidden_states, attns=all_self_attentions, crosses=all_cross_attentions, ) @dataclass class Output(qo.Output): state_preds = None action_preds = None return_preds = None hiddens = None attns = None y = None class Model(PreTrained): def __init__(self, config): super().__init__(config) self.config = config self.d_model = config.d_model self.encoder = GPT2Model(config) self.embed_timestep = qc.Embed(config.max_ep_len, config.d_model) self.embed_return = torch.qc.Linear(1, config.d_model) self.embed_state = torch.qc.Linear(config.state_dim, config.d_model) self.embed_action = torch.qc.Linear(config.act_dim, config.d_model) self.embed_ln = qc.LayerNorm(config.d_model) self.predict_state = torch.qc.Linear(config.d_model, config.state_dim) self.predict_action = nn.Sequential( *( [qc.Linear(config.d_model, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []) ) ) self.predict_return = torch.qc.Linear(config.d_model, 1) self.post_init() def forward( self, states=None, actions=None, rewards=None, returns_to_go=None, timesteps=None, attention_mask=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length = states.shape[0], states.shape[1] if attention_mask is None: # attention mask for GPT: 1 if can be attended to, 0 if not attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long) # embed each modality with a different head state_embeddings = self.embed_state(states) action_embeddings = self.embed_action(actions) returns_embeddings = self.embed_return(returns_to_go) time_embeddings = self.embed_timestep(timesteps) # time embeddings are treated similar to positional embeddings state_embeddings = state_embeddings + time_embeddings action_embeddings = action_embeddings + time_embeddings returns_embeddings = returns_embeddings + time_embeddings # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...) # which works nice in an autoregressive sense since states predict actions stacked_inputs = ( torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1) .permute(0, 2, 1, 3) .reshape(batch_size, 3 * seq_length, self.d_model) ) stacked_inputs = self.embed_ln(stacked_inputs) # to make the attention mask fit the stacked inputs, have to stack it as well stacked_attention_mask = ( torch.stack((attention_mask, attention_mask, attention_mask), dim=1) .permute(0, 2, 1) .reshape(batch_size, 3 * seq_length) ) device = stacked_inputs.device # we feed in the input embeddings (not word indices as in NLP) to the model encoder_outputs = self.encoder( inputs_embeds=stacked_inputs, attention_mask=stacked_attention_mask, position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) x = encoder_outputs[0] # reshape x so that the second dimension corresponds to the original # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t x = x.reshape(batch_size, seq_length, 3, self.d_model).permute(0, 2, 1, 3) # get predictions return_preds = self.predict_return(x[:, 2]) # predict next return given state and action state_preds = self.predict_state(x[:, 2]) # predict next state given state and action action_preds = self.predict_action(x[:, 1]) # predict next action given state if not return_dict: return (state_preds, action_preds, return_preds) return Output( y=encoder_outputs.y, state_preds=state_preds, action_preds=action_preds, return_preds=return_preds, hiddens=encoder_outputs.hiddens, attns=encoder_outputs.attns, )
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,388
quantapix/qnarre
refs/heads/main
/qnarre/models/fsmt.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import random import torch import deepspeed from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import attention as qa from ..core.embed import SinEmbed from ..prep.config.fsmt import PreTrained logger = logging.get_logger(__name__) class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.enc = Encoder(**kw) self.dec = Decoder(**kw) def forward(self, x, mask=None, x_dec=None, dec_m=None, dec_head_m=None, y_enc=None, **kw): cfg = self.cfg if x_dec is None: yo.cache = False if not yo.cache: x_dec, dec_m, causal_m = _prepare_fsmt_decoder_inputs( cfg, x, x_dec=x_dec, dec_m=dec_m, causal_m_dtype=self.dec.tok_emb.weight.dtype, ) else: dec_m, causal_m = None, None assert x_dec is not None if y_enc is None: y_enc = self.enc(x, **kw, mask=mask) y = self.dec( x_dec, **kw, dec_causal_m=causal_m, enc_m=mask, enc=y_enc[0], head_m=dec_head_m, mask=dec_m, ) ys = y + y_enc return qo.Seq2Seq(*ys) class ForCondGen(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) def forward(self, x, labels=None, **kw): cfg = self.cfg ys = self.model(x, **kw) loss = None if labels is not None: loss = nn.CrossEntropyLoss()(ys[0].view(-1, cfg.s_tgt_vocab), labels.view(-1)) ys += (loss,) return qo.LossSeq2Seq(*ys) class Encoder(qc.Module): hs = qc.Hypers({"d_model", "drop", "n_heads", "n_pos"}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m = cfg.d_model cfg.scale = m**0.5 if cfg.scale else 1.0 self.tok_emb = qc.Embed(cfg.s_src_vocab, m, **kw) self.pos_emb = SinEmbed(cfg.n_pos + cfg.PAD + 1, m, cfg.PAD) self.lays = qc.Stack([EncLayer(**kw) for _ in range(cfg.n_enc_lays)]) self.drop = qc.Dropout(cfg.drop, **kw) def forward(self, x, mask=None, head_m=None, **kw): cfg = self.cfg if mask is not None: mask = invert_mask(mask) y = self.tok_emb(x) * cfg.scale y = y + self.pos_emb(x) y = self.drop(y).transpose(0, 1) attns = hiddens = () assert head_m is None or (head_m.size()[0] == (len(self.lays))) for i, lay in enumerate(self.lays): hiddens += (y.transpose(0, 1),) if self.training and (random.uniform(0, 1) < cfg.drop_enc): continue else: h = head_m[i] if head_m is not None else None ys = lay(y, mask=mask, head_m=h, **kw) y = ys[0] attns += (ys[1],) y = y.transpose(0, 1) hiddens += (y,) return qo.Base(y, attns, hiddens) class Decoder(qc.Module): hs = qc.Hypers({"d_model", "drop", "n_heads", "n_pos"}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m = cfg.d_model cfg.scale = m**0.5 if cfg.scale else 1.0 self.tok_emb = qc.Embed(cfg.s_tgt_vocab, m, **kw) self.pos_emb = SinEmbed(cfg.n_pos + cfg.PAD + 1, m, cfg.PAD) self.lays = qc.Stack([DecLayer(**kw) for _ in range(cfg.n_dec_lays)]) if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(self.tok_emb.weight, modifier_rank=None): s = self.tok_emb.weight.shape else: s = self.tok_emb.weight.shape self.proj = qc.Linear(s[1], s[0], bias=False, **kw) self.proj.weight = self.tok_emb.weight self.drop = qc.Dropout(cfg.drop, **kw) def forward( self, x, enc, enc_m, dec_m, dec_causal_m, head_m=None, cross_m=None, cache=None, **kw ): cfg = self.cfg if enc_m is not None: enc_m = invert_mask(enc_m) y = self.tok_emb(x) * cfg.scale pos = self.pos_emb(x) if yo.cache: x = x[:, -1:] pos = pos[:, -1:] y += pos y = self.drop(y).transpose(0, 1) attns = caches = crosses = hiddens = () enc = enc.transpose(0, 1) for m, _ in zip([head_m, cross_m], ["head_m", "cross_m"]): if m is not None: assert m.size()[0] == (len(self.lays)) for i, lay in enumerate(self.lays): hiddens += (y.transpose(0, 1),) if self.training and (random.uniform(0, 1) < cfg.drop_dec): continue h = head_m[i] if head_m is not None else None c = cross_m[i] if cross_m is not None else None kw.update(enc=enc, enc_m=enc_m, dec_m=dec_m, head_m=h, cross_m=c) c = cache[i] if cache is not None else None ys = lay(y, causal_m=dec_causal_m, cache=c, **kw) y = ys[0] attns += (ys[1],) if enc is not None: crosses += (ys[2],) caches += (ys[-1],) enc = enc.transpose(0, 1) y = y.transpose(0, 1) hiddens += (y,) y = self.proj(y) return qo.CachesCrosses(y, attns, caches, crosses, hiddens) class EncLayer(qc.Module): hs = qc.Hypers({"d_model", "n_heads", "n_pos", "eps"}, {"drop_attn": 0.0, "is_dec": False}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m = cfg.d_model self.refl = Attention(n_heads=cfg.n_enc_heads, **kw) self.norm_refl = qc.LayerNorm(m, **kw) self.act = qu.activation(cfg.act_fun) self.ff = qc.Linear(m, cfg.d_enc_ffn, **kw) self.proj = qc.Linear(cfg.d_enc_ffn, m, **kw) self.norm = qc.LayerNorm(m, **kw) self.drop = qc.Dropout(cfg.drop, **kw) def forward(self, x, mask, **kw): y, a = self.refl(query=x, key=x, key_m=mask, **kw) y = self.norm_refl(x + self.drop(y)) x = y y = self.drop(self.act(self.ff(y))) y = self.drop(self.proj(y)) y = self.norm(x + y) return y, a class DecLayer(qc.Module): hs = qc.Hypers({"d_model", "n_heads", "n_pos", "eps"}, {"drop_attn": 0.0, "is_dec": False}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m = cfg.d_model self.refl = Attention(n_heads=cfg.n_dec_heads, **kw) self.norm_refl = qc.LayerNorm(m, **kw) self.act = qu.activation(cfg.act_fun) self.drop_act = qc.Dropout(cfg.drop_act, **kw) self.attn = Attention(n_heads=cfg.n_dec_heads, enc_dec_attn=True, **kw) self.norm_attn = qc.LayerNorm(m, **kw) self.ff = qc.Linear(m, cfg.d_dec_ffn, **kw) self.proj = qc.Linear(cfg.d_dec_ffn, m, **kw) self.norm = qc.LayerNorm(m, **kw) self.drop = qc.Dropout(cfg.drop, **kw) def forward( self, x, enc, enc_m=None, cache=None, causal_m=None, cross_m=None, dec_m=None, **kw ): if cache is None: cache = {} y, a = self.refl(query=x, key=x, key_m=dec_m, cache=cache, **kw, mask=causal_m) y = self.norm_refl(x + self.drop(y)) x = y assert self.attn.cache_key != self.refl.cache_key y, kv = self.attn(query=y, key=enc, key_m=enc_m, cache=cache, **kw, head_m=cross_m) y = self.norm_attn(x + self.drop(y)) x = y y = self.drop_act(self.act(self.ff(y))) y = self.drop(self.proj(y)) y = self.norm(x + y) return y, a, cache, kv def invert_mask(mask): assert mask.dim() == 2 return mask.eq(0) def triu_onnx(x, diagonal=0): l = x.shape[0] arange = torch.arange(l, device=x.device) mask = arange.expand(l, l) arange = arange.unsqueeze(-1) if diagonal: arange = arange + diagonal mask = mask >= arange return x.masked_fill(mask == 0, 0) def _prepare_fsmt_decoder_inputs( config, input_ids, x_dec=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32, ): PAD = config.PAD if x_dec is None: x_dec = qu.shift_right2(input_ids, PAD) bsz, tgt_len = x_dec.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(x_dec, PAD) else: decoder_padding_mask = invert_mask(decoder_padding_mask) causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)), 1).to( dtype=causal_mask_dtype, device=x_dec.device ) return x_dec, decoder_padding_mask, causal_mask def make_padding_mask(x, PAD=1): y = x.eq(PAD) if not y.any(): y = None return y class Attention(qc.Module): hs = qc.Hypers({"d_in", "d_out"}, {"drop": 0.0, "enc_dec_attn": False}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m, n = cfg.d_model, cfg.n_heads assert m % n == 0 cfg.d_head = h = m // n cfg.scale = 1 / (h**0.5) self.key = qc.Linear(m, m, **kw) self.value = qc.Linear(m, m, **kw) self.query = qc.Linear(m, m, **kw) self.proj = qc.Linear(m, m, **kw) self.drop = qc.Dropout(cfg.drop, **kw) self.cache_key = "encoder_decoder" if self.enc_dec_attn else "self" split_heads = qa.split_heads def forward(self, x, mask=None, head_m=None, enc=None, enc_m=None, cache=None, **kw): cfg = self.cfg static_kv = self.enc_dec_attn if cache is not None: saved_state = None cache = {} else: saved_state = cache.get(self.cache_key, {}) if "prev_key" in saved_state and static_kv: enc = None q = self.split_heads(self.query(x) * cfg.scale) if static_kv: if enc is None: k = v = None else: k = self.split_heads(self.key(enc)) v = self.split_heads(self.value(enc)) else: k = self.split_heads(self.key(x)) v = self.split_heads(self.value(x)) if saved_state is not None: k, v, enc_m = self._use_saved_state(k, v, saved_state, enc_m, static_kv, b) cache[self.cache_key] = { "prev_key": k.view(b, n, -1, cfg.d_head), "prev_value": v.view(b, n, -1, cfg.d_head), "prev_key_padding_mask": enc_m if not static_kv else None, } n = cfg.n_heads tgt, b, _ = x.size() src = k.size(1) y = torch.bmm(q, k.transpose(1, 2)) assert y.size() == (b * n, tgt, src) if mask is not None: y = y.view(b, n, tgt, src) + mask y = y.view(b * n, tgt, src) if enc_m is not None and enc_m.dim() == 0: enc_m = None assert enc_m is None or enc_m.size()[:2] == (b, src) if enc_m is not None: y = y.view(b, n, tgt, src) reshaped = enc_m.unsqueeze(1).unsqueeze(2) y = y.masked_fill(reshaped, float("-inf")) y = y.view(b * n, tgt, src) y = F.softmax(y, dim=-1) if head_m is not None: assert head_m.size() == (n,) y = head_m.view(1, -1, 1, 1) * y.view(b, n, tgt, src) y = y.view(b * n, tgt, src) a = y.view(b, n, tgt, src) y = a.view(b * n, tgt, src) y = self.drop(y) y = torch.bmm(y, v) assert y.size() == (b * n, tgt, cfg.d_head) y = y.transpose(0, 1).contiguous().view(tgt, b, cfg.d_model) y = self.proj(y) return y, a def _use_saved_state(self, k, v, saved_state, key_m, static_kv, bsz): cfg = self.cfg if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * cfg.n_heads, -1, cfg.d_head) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * cfg.n_heads, -1, cfg.d_head) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask = saved_state.get("prev_key_padding_mask", None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_m], dim=1) else: new_key_padding_mask = key_m return k, v, new_key_padding_mask def fill_with_neg_inf(t): return t.float().fill_(float("-inf")).type_as(t)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,389
quantapix/qnarre
refs/heads/main
/qnarre/models/roformer.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import math import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import forward as qf from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.bert import PreTrained from torch.nn import CrossEntropyLoss from ...modeling_utils import SequenceSummary from ...pytorch_utils import ( apply_chunking_to_forward, ) log = logging.get_logger(__name__) LIST = [ "junnyu/roformer_chinese_small", "junnyu/roformer_chinese_base", "junnyu/roformer_chinese_char_small", "junnyu/roformer_chinese_char_base", "junnyu/roformer_small_discriminator", "junnyu/roformer_small_generator", ] # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->RoFormer class RoFormerSinusoidalPositionalEmbedding(qc.Embed): def __init__(self, num_positions, embedding_dim, padding_idx=None): super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter): n_pos, dim = out.shape position_enc = np.array( [ [pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos) ] ) out.requires_grad = False # set early to avoid an error in pytorch-1.8+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape, past_key_values_length=0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device, ) return super().forward(positions) class RoFormerEmbeddings(qc.Module): def __init__(self, config): super().__init__() self.word_embeddings = qc.Embed(config.s_vocab, config.d_embed, padding_idx=config.PAD) self.token_type_embeddings = qc.Embed(config.n_typ, config.d_embed) self.norm = qc.LayerNorm(config.d_embed, eps=config.eps) self.drop = qc.Dropout(config.drop) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.norm(embeddings) embeddings = self.drop(embeddings) return embeddings class RoFormerSelfAttention(qc.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"): raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_heads})" ) self.n_heads = config.n_heads self.attention_head_size = int(config.d_model / config.n_heads) self.all_head_size = self.n_heads * self.attention_head_size self.query = qc.Linear(config.d_model, self.all_head_size) self.key = qc.Linear(config.d_model, self.all_head_size) self.value = qc.Linear(config.d_model, self.all_head_size) self.drop = qc.Dropout(config.drop_attn) self.is_decoder = config.is_decoder self.rotary_value = config.rotary_value def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hiddens, attention_mask=None, sinusoidal_pos=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hiddens) query_layer = self.transpose_for_scores(mixed_query_layer) is_cross_attention = enc_hiddens is not None if is_cross_attention and past_key_value is not None: # reuse k,v, crosses key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(enc_hiddens)) value_layer = self.transpose_for_scores(self.value(enc_hiddens)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hiddens)) value_layer = self.transpose_for_scores(self.value(hiddens)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hiddens)) value_layer = self.transpose_for_scores(self.value(hiddens)) if sinusoidal_pos is not None: if self.rotary_value: query_layer, key_layer, value_layer = self.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer, value_layer ) else: query_layer, key_layer = self.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = F.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.drop(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs @staticmethod def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None): sin, cos = sinusoidal_pos.chunk(2, dim=-1) sin_pos = torch.stack([sin, sin], dim=-1).reshape_as(sinusoidal_pos) cos_pos = torch.stack([cos, cos], dim=-1).reshape_as(sinusoidal_pos) rotate_half_query_layer = torch.stack( [-query_layer[..., 1::2], query_layer[..., ::2]], dim=-1 ).reshape_as(query_layer) query_layer = query_layer * cos_pos + rotate_half_query_layer * sin_pos rotate_half_key_layer = torch.stack( [-key_layer[..., 1::2], key_layer[..., ::2]], dim=-1 ).reshape_as(key_layer) key_layer = key_layer * cos_pos + rotate_half_key_layer * sin_pos if value_layer is not None: rotate_half_value_layer = torch.stack( [-value_layer[..., 1::2], value_layer[..., ::2]], dim=-1 ).reshape_as(value_layer) value_layer = value_layer * cos_pos + rotate_half_value_layer * sin_pos return query_layer, key_layer, value_layer return query_layer, key_layer # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RoFormer class RoFormerSelfOutput(qc.Module): def __init__(self, config): super().__init__() self.dense = qc.Linear(config.d_model, config.d_model) self.norm = qc.LayerNorm(config.d_model, eps=config.eps) self.drop = qc.Dropout(config.drop) def forward(self, hiddens, input_tensor): hiddens = self.dense(hiddens) hiddens = self.drop(hiddens) hiddens = self.norm(hiddens + input_tensor) return hiddens class Attention(qc.Module): def __init__(self, config): super().__init__() self.self = RoFormerSelfAttention(config) self.output = RoFormerSelfOutput(config) def forward( self, hiddens, attention_mask=None, sinusoidal_pos=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hiddens, attention_mask, sinusoidal_pos, head_mask, enc_hiddens, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hiddens) outputs = (attention_output,) + self_outputs[1:] # add attns if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoFormer class RoFormerIntermediate(qc.Module): def __init__(self, cfg): super().__init__() self.dense = qc.Linear(cfg.d_model, cfg.d_ff) self.act = qu.activation(cfg.act) def forward(self, x): y = self.dense(x) y = self.act(y) return y # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RoFormer class RoFormerOutput(qc.Module): def __init__(self, config): super().__init__() self.dense = qc.Linear(config.d_ff, config.d_model) self.norm = qc.LayerNorm(config.d_model, eps=config.eps) self.drop = qc.Dropout(config.drop) def forward(self, hiddens, input_tensor): hiddens = self.dense(hiddens) hiddens = self.drop(hiddens) hiddens = self.norm(hiddens + input_tensor) return hiddens class Layer(qc.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError( f"{self} should be used as a decoder model if cross attention is added" ) self.crossattention = Attention(config) self.intermediate = RoFormerIntermediate(config) self.output = RoFormerOutput(config) def forward( self, hiddens, attention_mask=None, sinusoidal_pos=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hiddens, attention_mask, sinusoidal_pos, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attns if we output attention weights cross_attn_present_key_value = None if self.is_decoder and enc_hiddens is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention " "layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, sinusoidal_pos, head_mask, enc_hiddens, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = ( outputs + cross_attention_outputs[1:-1] ) # add cross attns if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class Encoder(qc.Module): def __init__(self, config): super().__init__() self.config = config self.embed_positions = RoFormerSinusoidalPositionalEmbedding( config.n_pos, config.d_model // config.n_heads ) self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)]) self.gradient_checkpointing = False def forward( self, hiddens, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, caches=None, y_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None sinusoidal_pos = self.embed_positions(hiddens.shape[:-1])[None, None, :, :] next_decoder_cache = () if y_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = caches[i] if caches is not None else None if self.gradient_checkpointing and self.training: if y_cache: log.warning( "`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..." ) y_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hiddens, attention_mask, sinusoidal_pos, layer_head_mask, enc_hiddens, encoder_attention_mask, ) else: layer_outputs = layer_module( hiddens, attention_mask, sinusoidal_pos, layer_head_mask, enc_hiddens, encoder_attention_mask, past_key_value, output_attentions, ) hiddens = layer_outputs[0] if y_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if not return_dict: return tuple( v for v in [ hiddens, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return qo.CachesCrosses( y=hiddens, caches=next_decoder_cache, hiddens=all_hidden_states, attns=all_self_attentions, crosses=all_cross_attentions, ) class Model(PreTrained): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = RoFormerEmbeddings(config) if config.d_embed != config.d_model: self.embeddings_project = qc.Linear(config.d_embed, config.d_model) self.encoder = Encoder(config) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, head_mask=None, inputs_embeds=None, enc_hiddens=None, encoder_attention_mask=None, caches=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: y_cache = y_cache if y_cache is not None else self.config.y_cache else: y_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = caches[0][0].shape[2] if caches is not None else 0 if attention_mask is None: attention_mask = torch.ones( ((batch_size, seq_length + past_key_values_length)), device=device ) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, device ) if self.config.is_decoder and enc_hiddens is not None: encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_lays) embedding_output = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) if hasattr(self, "embeddings_project"): embedding_output = self.embeddings_project(embedding_output) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_extended_attention_mask, caches=caches, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return qo.CachesCrosses( y=sequence_output, caches=encoder_outputs.caches, hiddens=encoder_outputs.hiddens, attns=encoder_outputs.attns, crosses=encoder_outputs.crosses, ) class ForMasked(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = Predictor(cfg.d_embed, **kw) forward = qf.forward_masked class ForCausal(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = Predictor(cfg.d_embed, **kw) forward = qf.forward_causal def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, enc_hiddens=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, caches=None, labels=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_attention_mask, caches=caches, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct( shifted_prediction_scores.view(-1, self.config.s_vocab), labels.view(-1) ) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, caches=outputs.caches, hiddens=outputs.hiddens, attns=outputs.attns, crosses=outputs.crosses, ) class ForChoice(PreTrained): def __init__(self, config): super().__init__(config) self.roformer = Model(config) self.sequence_summary = SequenceSummary(config) self.classifier = qc.Linear(config.d_model, 1) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = ( attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None ) token_type_ids = ( token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None ) inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return qo.WithLoss( loss=loss, logits=reshaped_logits, hiddens=outputs.hiddens, attns=outputs.attns, ) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(cfg.d_model, **kw) forward = qf.forward_seq class ForTokClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(**kw) forward = qf.forward_tok class ForQA(PreTrained): def __init__(self, **kw): kw.update(n_labels=2) super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(add_pool=False, **kw) self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw) forward = qf.forward_qa
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,390
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/xlnet.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os import unicodedata from shutil import copyfile import sentencepiece as spm from ...file_utils import SPIECE_UNDERLINE from ...tokens.utils import AddedToken, PreTrainedTokenizer VOCAB_FS = {"vocab_file": "spiece.model"} VOCAB_MAP = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } INPUT_CAPS = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) SEG_ID_A = 0 SEG_ID_B = 1 SEG_ID_CLS = 2 SEG_ID_SEP = 3 SEG_ID_PAD = 4 class Tokenizer(PreTrainedTokenizer): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP input_caps = INPUT_CAPS padding_side = "left" def __init__( self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos="<s>", eos="</s>", unk="<unk>", sep="<sep>", pad="<pad>", cls="<cls>", msk="<mask>", additional_special_tokens=["<eop>", "<eod>"], sp_model_kw=None, **kw, ): msk = AddedToken(msk, lstrip=True, rstrip=False) if isinstance(msk, str) else msk self.sp_model_kw = {} if sp_model_kw is None else sp_model_kw super().__init__( do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos=bos, eos=eos, unk=unk, sep=sep, pad=pad, cls=cls, msk=msk, additional_special_tokens=additional_special_tokens, sp_model_kw=self.sp_model_kw, **kw, ) self._pad_token_type_id = 3 self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kw) self.sp_model.Load(vocab_file) @property def s_vocab(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.s_vocab)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, "sp_model_kw"): self.sp_model_kw = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kw) self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = " ".join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace("``", '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text): text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def build_inputs_with_special_tokens(self, toks_0, toks_1=None): sep = [self.SEP] cls = [self.cls_token_id] if toks_1 is None: return toks_0 + sep + cls return toks_0 + sep + toks_1 + sep + cls def get_special_tokens_mask( self, toks_0, toks_1=None, has_specials=False, ): if has_specials: return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True) if toks_1 is not None: return ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1, 1] return ([0] * len(toks_0)) + [1, 1] def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): sep = [self.SEP] cls_segment_id = [2] if toks_1 is None: return len(toks_0 + sep) * [0] + cls_segment_id return len(toks_0 + sep) * [0] + len(toks_1 + sep) * [1] + cls_segment_id def save_vocabulary(self, dir, pre=None): path = os.path.join( dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"], ) if os.path.abspath(self.vocab_file) != os.path.abspath(path) and os.path.isfile( self.vocab_file ): copyfile(self.vocab_file, path) elif not os.path.isfile(self.vocab_file): with open(path, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (path,)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,391
quantapix/qnarre
refs/heads/main
/qnarre/base/doc.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import datetime as dt from ..rectify import rectifier from .author import Author from .named import Named, Saved class Genre(Named): pass class Doc(Saved, Named): suff = '.txt' pages = None def __init__(self, genre=None, author=None, title=None, pages=None, **kw): super().__init__(**kw) if genre: self.genre = Genre.create(name=genre) if author: self.author = author if title: self.title = title if pages: self.pages = pages @property def factor(self): return self.genre.factor * super().factor @property def bias(self): return self.genre.bias + super().bias @property def date(self): s = self.name.split('/')[2] s = '-'.join(s.split('-')[:3]) return dt.datetime.strptime(s, '%y-%m-%d').date() @property def props(self): return { 'name': self.name, 'genre': self.genre.name, 'author': self.author, 'title': self.title, } @property def fields(self): s = '{}.pdf'.format(self.name) fs = {'Date': self.date, 'Title': self.title, 'Source': s} fs.update(Author.create(name=self.author).fields) fs.update({'Type': self.tag, 'Genre': self.genre.name}) return fs def from_text(self, txt, **_): txt = tuple(rectifier(txt)) self.title = txt[0] txt = '\n'.join(txt[2:]) self.pages = gs = [] for g in txt.split('\n\n\n'): rs = [] for r in g.split('\n\n'): rs.append(r.splitlines()) gs.append(rs) def to_text(self, **_): txt = [self.title, ''] for rs in self.pages: for ls in rs: txt.extend(ls) txt.append('') txt.append('') return '\n'.join(txt).strip()
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,392
quantapix/qnarre
refs/heads/main
/qnarre/run/ddp.py
import argparse import os import tempfile import torch import torch.distributed as dist import torch.nn as nn import torch.optim as optim from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed._tensor import DeviceMesh from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.net1 = nn.Linear(10, 32) self.relu = nn.ReLU() self.net2 = nn.Linear(32, 5) def forward(self, x): return self.net2(self.relu(self.net1(x))) class Model2(nn.Module): def __init__(self, d0, d1): super(Model2, self).__init__() self.d0 = d0 self.d1 = d1 self.net1 = torch.nn.Linear(10, 32).to(d0) self.relu = torch.nn.ReLU() self.net2 = torch.nn.Linear(32, 5).to(d1) def forward(self, x): x = x.to(self.d0) x = self.relu(self.net1(x)) x = x.to(self.d1) return self.net2(x) def run(args): r = int(os.environ["LOCAL_RANK"]) n = torch.cuda.device_count() // int(os.environ["LOCAL_WORLD_SIZE"]) ids = list(range(r * n, (r + 1) * n)) print( f"[{os.getpid()}] run with: world_size = {dist.get_world_size()}, " + f"rank = {dist.get_rank()}, n = {n}, device_ids = {ids} \n", end="", ) id = ids[0] if len(ids) == 1: m = DDP(Model().cuda(id), device_ids=ids, output_device=id) labels = torch.randn(20, 5).to(id) else: if args.mesh: mesh = DeviceMesh("cuda", ids) m = parallelize_module(Model(), mesh, PairwiseParallel()) labels = torch.randn(20, 5).to(id) else: m = DDP(Model2(id, ids[1])) labels = torch.randn(20, 5).to(ids[1]) loss = nn.MSELoss() o = optim.SGD(m.parameters(), lr=0.001) for _ in range(args.iter_nums): o.zero_grad() ys = m(torch.randn(20, 10).cuda(id)) loss(ys, labels).backward() o.step() def run_checkpoint(local_world): r = int(os.environ["LOCAL_RANK"]) model = Model().to(r) ddp = DDP(model, device_ids=[r]) loss = nn.MSELoss() optimizer = optim.SGD(ddp.parameters(), lr=0.001) CHECKPOINT_PATH = tempfile.gettempdir() + "/model.checkpoint" if r == 0: torch.save(ddp.state_dict(), CHECKPOINT_PATH) dist.barrier() map_location = {"cuda:%d" % 0: "cuda:%d" % r} ddp.load_state_dict(torch.load(CHECKPOINT_PATH, map_location=map_location)) optimizer.zero_grad() ys = ddp(torch.randn(20, 10)) labels = torch.randn(20, 5).to(r) loss(ys, labels).backward() optimizer.step() dist.barrier() if r == 0: os.remove(CHECKPOINT_PATH) def main(args): env = {k: os.environ[k] for k in ("MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK")} print(f"[{os.getpid()}] init_process_group with: {env}") dist.init_process_group(backend="nccl") print( f"[{os.getpid()}] main with: world_size = {dist.get_world_size()}, " + f"rank = {dist.get_rank()}, backend={dist.get_backend()} \n", end="", ) run(args) dist.destroy_process_group() if __name__ == "__main__": p = argparse.ArgumentParser() p.add_argument("--iter_nums", type=int, default=2) p.add_argument("--mesh", action="store_true") args = p.parse_args() main(args) # torchrun --standalone --nproc-per-node=gpu ddp.py # torchrun --rdzv-id=123 --rdzv-backend=c10d --rdzv-endpoint=localhost:29402 --nnodes=1:2 --nproc-per-node=2 ddp.py
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,393
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/convbert.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .bert import Tokenizer as Bert VOCAB_FS = {"vocab_file": "vocab.txt"} VOCAB_MAP = { "vocab_file": { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt", "YituTech/conv-bert-medium-small": "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt", "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt", } } INPUT_CAPS = { "YituTech/conv-bert-base": 512, "YituTech/conv-bert-medium-small": 512, "YituTech/conv-bert-small": 512, } PRETRAINED_INIT_CONFIGURATION = { "YituTech/conv-bert-base": {"do_lower_case": True}, "YituTech/conv-bert-medium-small": {"do_lower_case": True}, "YituTech/conv-bert-small": {"do_lower_case": True}, } class Tokenizer(Bert): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP input_caps = INPUT_CAPS pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,394
quantapix/qnarre
refs/heads/main
/qnarre/prep/dataset/xglue.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json import os import datasets as ds _XGLUE_ALL_DATA = "https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz" _LANGS = { "ner": ["en", "de", "es", "nl"], "pos": ["en", "de"], "mlqa": ["en", "de"], "nc": ["en", "de"], "xnli": ["en", "de"], "paws-x": ["en", "de"], "qadsm": ["en", "de"], "wpr": ["en", "de"], "qam": ["en", "de"], "qg": ["en", "de"], "ntg": ["en", "de"], } _PATHS = { "mlqa": { "train": os.path.join("squad1.1", "train-v1.1.json"), "dev": os.path.join("MLQA_V1", "dev", "dev-context-{0}-question-{0}.json"), "test": os.path.join("MLQA_V1", "test", "test-context-{0}-question-{0}.json"), }, "xnli": {"train": "multinli.train.en.tsv", "dev": "{}.dev", "test": "{}.test"}, "paws-x": { "train": os.path.join("en", "train.tsv"), "dev": os.path.join("{}", "dev_2k.tsv"), "test": os.path.join("{}", "test_2k.tsv"), }, } for x in ["ner", "pos"]: _PATHS[x] = {"train": "en.train", "dev": "{}.dev", "test": "{}.test"} for x in ["nc", "qadsm", "wpr", "qam"]: _PATHS[x] = { "train": "xglue." + x + ".en.train", "dev": "xglue." + x + ".{}.dev", "test": "xglue." + x + ".{}.test", } for x in ["qg", "ntg"]: _PATHS[x] = { "train": "xglue." + x + ".en", "dev": "xglue." + x + ".{}", "test": "xglue." + x + ".{}", } class Config(ds.BuilderConfig): def __init__( self, data_dir, citation, url, **kw, ): super(Config, self).__init__(version=ds.Version("1.0.0", ""), **kw) self.data_dir = data_dir self.citation = citation self.url = url class XGlue(ds.GeneratorBasedBuilder): BUILDER_CONFIGS = [ Config(name="ner", data_dir="NER"), Config(name="pos", data_dir="POS"), Config(name="mlqa", data_dir="MLQA"), Config(name="nc", data_dir="NC"), Config(name="xnli", data_dir="XNLI"), Config(name="paws-x", data_dir="PAWSX"), Config(name="qadsm", data_dir="QADSM"), Config(name="wpr", data_dir="WPR"), Config(name="qam", data_dir="QAM"), Config(name="qg", data_dir="QG"), Config(name="ntg", data_dir="NTG"), ] def _info(self): if self.config.name == "ner": features = { "words": ds.Sequence(ds.Value("string")), "ner": ds.Sequence( ds.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC", ] ) ), } elif self.config.name == "pos": features = { "words": ds.Sequence(ds.Value("string")), "pos": ds.Sequence( ds.features.ClassLabel( names=[ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ] ) ), } elif self.config.name == "mlqa": features = { "context": ds.Value("string"), "question": ds.Value("string"), "answers": ds.features.Sequence( {"answer_start": ds.Value("int32"), "text": ds.Value("string")} ), } elif self.config.name == "nc": features = { "news_title": ds.Value("string"), "news_body": ds.Value("string"), "news_category": ds.ClassLabel( names=[ "foodanddrink", "sports", "travel", "finance", "lifestyle", "news", "entertainment", "health", "video", "autos", ] ), } elif self.config.name == "xnli": features = { "premise": ds.Value("string"), "hypothesis": ds.Value("string"), "label": ds.features.ClassLabel(names=["entailment", "neutral", "contradiction"]), } elif self.config.name == "paws-x": features = { "sentence1": ds.Value("string"), "sentence2": ds.Value("string"), "label": ds.features.ClassLabel(names=["different", "same"]), } elif self.config.name == "qadsm": features = { "query": ds.Value("string"), "ad_title": ds.Value("string"), "ad_description": ds.Value("string"), "relevance_label": ds.features.ClassLabel(names=["Bad", "Good"]), } elif self.config.name == "wpr": features = { "query": ds.Value("string"), "web_page_title": ds.Value("string"), "web_page_snippet": ds.Value("string"), "relavance_label": ds.features.ClassLabel( names=["Bad", "Fair", "Good", "Excellent", "Perfect"] ), } elif self.config.name == "qam": features = { "question": ds.Value("string"), "answer": ds.Value("string"), "label": ds.features.ClassLabel(names=["False", "True"]), } elif self.config.name == "qg": features = { "answer_passage": ds.Value("string"), "question": ds.Value("string"), } elif self.config.name == "ntg": features = { "news_body": ds.Value("string"), "news_title": ds.Value("string"), } return ds.DatasetInfo( description="", citation="", homepage="", license="", features=ds.Features(features), ) def _split_generators(self, mgr): all_data_folder = mgr.download_and_extract(_XGLUE_ALL_DATA) data_folder = os.path.join(all_data_folder, "xglue_full_dataset", self.config.data_dir) name = self.config.name languages = _LANGS[name] return ( [ ds.SplitGenerator( name=ds.Split.TRAIN, gen_kw={ "data_file": os.path.join(data_folder, _PATHS[name]["train"]), "split": "train", }, ), ] + [ ds.SplitGenerator( name=ds.Split(f"validation.{c}"), gen_kw={ "data_file": os.path.join(data_folder, _PATHS[name]["dev"].format(c)), "split": "dev", }, ) for c in languages ] + [ ds.SplitGenerator( name=ds.Split(f"test.{x}"), gen_kw={ "data_file": os.path.join(data_folder, _PATHS[name]["test"].format(x)), "split": "test", }, ) for x in languages ] ) def _generate_examples(self, data_file, split=None): keys = list(self._info().features.keys()) if self.config.name == "mlqa": with open(data_file, encoding="utf-8") as f: data = json.load(f) for examples in data["data"]: for example in examples["paragraphs"]: context = example["context"] for qa in example["qas"]: question = qa["question"] id_ = qa["id"] answers = qa["answers"] answers_start = [answer["answer_start"] for answer in answers] answers_text = [answer["text"] for answer in answers] yield id_, { "context": context, "question": question, "answers": {"answer_start": answers_start, "text": answers_text}, } elif self.config.name in ["ner", "pos"]: words = [] result = [] idx = -1 with open(data_file, encoding="utf-8") as f: for line in f: if line.strip() == "": if len(words) > 0: y_kw = {keys[0]: words, keys[1]: result} words = [] result = [] idx += 1 yield idx, y_kw else: splits = line.strip().split(" ") words.append(splits[0]) result.append(splits[1]) elif self.config.name in ["ntg", "qg"]: with open(data_file + ".src." + split, encoding="utf-8") as src_f, open( data_file + ".tgt." + split, encoding="utf-8" ) as tgt_f: for idx, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)): yield idx, {keys[0]: src_line.strip(), keys[1]: tgt_line.strip()} else: _process_dict = { "paws-x": {"0": "different", "1": "same"}, "xnli": {"contradictory": "contradiction"}, "qam": {"0": "False", "1": "True"}, "wpr": {"0": "Bad", "1": "Fair", "2": "Good", "3": "Excellent", "4": "Perfect"}, } def _process(value): if self.config.name in _process_dict and value in _process_dict[self.config.name]: return _process_dict[self.config.name][value] return value with open(data_file, encoding="utf-8") as f: for idx, line in enumerate(f): if data_file.split(".")[-1] == "tsv" and idx == 0: continue items = line.strip().split("\t") yield idx, { key: _process(value) for key, value in zip( keys, items[1:] if self.config.name == "paws-x" else items ) }
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,395
quantapix/qnarre
refs/heads/main
/qnarre/base/base.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= class Keys: AGENCY = 'agency' KIND = 'kind' NAME = 'name' TITLE = 'title' ATTY = 'Atty.' DET = 'Det.' HON = 'Hon.' DR = 'Dr.' MS = 'Ms.' ATTORNEY = 'attorney' OFFICER = 'officer' EXPERT = 'expert' JUDGE = 'judge' SELF = 'self' GAL = 'gal' DCF = 'DCF' COURT = 'Court' POLICE = 'Police' TESTAMENT = 'Testament' LETTER = 'letter' MESSAGE = 'message' MOTION = 'motion' ORDER = 'order' REPORT = 'report'
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,396
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/ipython.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from traitlets.config.loader import Config from IPython.terminal.prompts import Prompts, Token from IPython.terminal.embed import InteractiveShellEmbed class Prompt(Prompts): def in_prompt_tokens(self, cli=None): return ( (Token.Prompt, 'In <'), (Token.PromptNum, str(self.shell.execution_count)), (Token.Prompt, '>: '), ) def out_prompt_tokens(self): return ( (Token.OutPrompt, 'Out<'), (Token.OutPromptNum, str(self.shell.execution_count)), (Token.OutPrompt, '>: '), ) try: get_ipython except NameError: nested = 0 cfg = Config() cfg.TerminalInteractiveShell.prompts_class = Prompt else: print("Running nested copies of IPython.") print("The prompts for the nested copy have been modified") cfg = Config() nested = 1 ipshell = InteractiveShellEmbed( config=cfg, banner1='Entering IPython', exit_msg='Exiting IPython...') ipshell2 = InteractiveShellEmbed(config=cfg, banner1='IPython again') # ipshell('***Called from top level. ' # 'Hit Ctrl-D to exit interpreter and continue program.\n' # 'Note that if you use %kill_embedded, you can fully deactivate\n' # 'This embedded instance so it will never turn on again') # usage: # from .ipython import ipshell # ipshell()
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,397
quantapix/qnarre
refs/heads/main
/qnarre/models/old/session.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from datetime import datetime import pathlib as pth import datetime as dt from qnarre.core.models import pt from tensorboard.plugins import hparams from tensorboard.plugins.hparams import summary as tb_summary def session_for(ps, sid=None): if ps.predict_run: sess = eager_pred if ps.eager_mode else predict else: if ps.eval_only: sess = eager_eval if ps.eager_mode else evaluate else: sess = eager_train if ps.eager_mode else train sid = sid or datetime.now().strftime("%Y%m%d-%H%M%S") return lambda *args, **kw: sess(sid, ps, *args, **kw) TRAIN = "train" def eager_train(sid, ps, dset_fn, model_fn, cbacks=None): dset = dset_fn(ps, TRAIN) # dset_test = dset_fn(ps, 'test') model = model_fn(ps) def step(x, y): with tf.GradientTape() as tape: logits = model(x) loss = ps.losses(y, logits) loss += sum(model.losses) acc = ps.metrics(y, logits) grads = tape.gradient(loss, model.trainable_variables) ps.optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss, acc @tf.function def epoch(): s, loss, acc = 0, 0.0, 0.0 for x, y in dset: s += 1 loss, acc = step(x, y) if tf.equal(s % 10, 0): m = ps.metrics.result() tf.print("Step:", s, ", loss:", loss, ", acc:", m) return loss, acc for e in range(ps.train_epochs): loss, acc = epoch() print(f"Epoch {e} loss:", loss, ", acc:", acc) def train(sid, ps, dset_fn, model_fn, cbacks=None): ds = dset_fn(ps, TRAIN) # with T.distribute.MirroredStrategy().scope(): mdl = model_fn(ps, compiled=True) mp = pth.Path.cwd() / ps.dir_model / ps.model if mp.exists() and tf.get_checkpoint_state(str(mp)): mdl.train_on_batch(ds) mdl.load_weights(str(mp / TRAIN)) lp = pth.Path.cwd() / ps.dir_log / ps.model if lp.exists(): sumy = tf.create_file_writer(str(lp / TRAIN / sid)) sum_s = tb_summary.session_start_pb(hparams=ps.hparams) cbs = cbacks or [] if lp.exists(): cbs.append( tf.TensorBoard( log_dir=str(lp / TRAIN / sid), histogram_freq=1, embeddings_freq=0, update_freq="epoch", ) ) cbs.append(tf.EarlyStopping(monitor="val_loss", min_delta=1e-2, patience=2, verbose=True)) if mp.exists(): cbs.append( tf.ModelCheckpoint( str(mp / TRAIN), save_weights_only=True, # save_best_only=True, verbose=True, ) ) ds_test = dset_fn(ps, "test") hist = mdl.fit(ds, callbacks=cbs, epochs=ps.train_epochs, validation_data=ds_test) print(f"History: {hist.history}") sp = pth.Path.cwd() / ps.dir_save / ps.model if sp.exists(): tf.export_saved_model(mdl, str(sp)) loss, acc = mdl.evaluate(ds_test) print(f"\nEval loss, acc: {loss}, {acc}") """ with sumy.as_default(): e = tf.Event(summary=sum_s).SerializeToString() tf.import_event(e) tf.scalar('accuracy', acc, step=1, description='Accuracy') sum_e = tb_summary.session_end_pb(hparams.api_pb2.STATUS_SUCCESS) e = tf.Event(summary=sum_e).SerializeToString() tf.import_event(e) """ def evaluate(sid, ps, dset_fn, model_fn, cbacks=None): ds = dset_fn(ps, "test") # with T.distribute.MirroredStrategy().scope(): p = str(pth.Path.cwd() / ps.dir_save / ps.model) assert tf.contains_saved_model(p) mdl = tf.load_from_saved_model(p) loss, acc = mdl.evaluate(ds) print(f"\nEvaluate loss, acc: {loss}, {acc}") def predict(sid, ps, dset_fn, model_fn, cbacks=None): ds = dset_fn(ps, "try") # with T.distribute.MirroredStrategy().scope(): p = str(pth.Path.cwd() / ps.dir_save / ps.model) assert tf.contains_saved_model(p) m = tf.load_from_saved_model(p) def train_loop(params, model_fn, dset_fn, cbacks=None): ps = params nus = [16, 32, 512] drs = [0.1, 0.2] opts = ["adam", "sgd"] writer = tf.create_file_writer(ps.dir_log + "/train") with writer.as_default(): s = None # _to_summary_pb(nus, drs, opts) e = tf.Event(summary=s).SerializeToString() tf.import_event(e) for nu in nus: for dr in drs: for opt in opts: kw = {"num_units": nu, "drop_rate": dr, "optimizer": opt} sid = dt.datetime.now().strftime("%Y%m%d-%H%M%S") print(f"--- Running session {sid}:", kw) ps.update(**kw) train_sess(ps, model_fn, dset_fn, cbacks, sid=sid) return """ names = [str(i) for i in range(ps.num_classes)] labels = [lb.numpy() for _, lb in ds_test] def log_confusion_matrix(epoch, logs): preds = N.argmax(model.predict(ds_test), axis=1) cm = sklearn.metrics.confusion_matrix(labels, preds) img = _to_image(_to_plot(cm, names)) with writer.as_default(): T.summary.image('Confusion Matrix', img, step=epoch) cbacks = [ kcb.LambdaCallback(on_epoch_end=log_confusion_matrix), ] def log_confusion_matrix(epoch, logs): names = [str(i) for i in range(params.num_classes)] labels = [lb.numpy() for _, lb in ds_test] preds = N.argmax(model.predict(ds_test), axis=1) cm = sklearn.metrics.confusion_matrix(labels, preds) img = _to_image(_to_plot(cm, names)) with writer.as_default(): T.summary.image("Confusion Matrix", img, step=epoch) def _to_plot(cm, names): fig = plt.figure(figsize=(8, 8)) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title("Confusion Matrix") plt.colorbar() ticks = N.arange(len(names)) plt.xticks(ticks, names, rotation=45) plt.yticks(ticks, names) cm = N.around(cm.astype('float') / cm.sum(axis=1)[:, N.newaxis], decimals=2) threshold = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): color = "white" if cm[i, j] > threshold else "black" plt.text(j, i, cm[i, j], horizontalalignment="center", color=color) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return fig def _to_image(fig): buf = io.BytesIO() plt.savefig(buf, format='png') plt.close(fig) buf.seek(0) img = T.image.decode_png(buf.getvalue(), channels=4) img = T.expand_dims(img, 0) return img def _to_summary_pb(num_units_list, dropout_rate_list, optimizer_list): nus_val = struct_pb2.ListValue() nus_val.extend(num_units_list) drs_val = struct_pb2.ListValue() drs_val.extend(dropout_rate_list) opts_val = struct_pb2.ListValue() opts_val.extend(optimizer_list) return hparams.summary.experiment_pb( hparam_infos=[ hparams.api_pb2.HParamInfo(name='num_units', display_name='Number of units', type=hparams.api_pb2.DATA_TYPE_FLOAT64, domain_discrete=nus_val), hparams.api_pb2.HParamInfo(name='drop_rate', display_name='Dropout rate', type=hparams.api_pb2.DATA_TYPE_FLOAT64, domain_discrete=drs_val), hparams.api_pb2.HParamInfo(name='optimizer', display_name='Optimizer', type=hparams.api_pb2.DATA_TYPE_STRING, domain_discrete=opts_val) ], metric_infos=[ hparams.api_pb2.MetricInfo( name=hparams.api_pb2.MetricName(tag='accuracy'), display_name='Accuracy'), ]) def get_assignment_map_from_checkpoint(tvars, init_checkpoint): import re import collections as co assignment_map = {} initialized_variable_names = {} name_to_variable = co.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = T.train.list_variables(init_checkpoint) assignment_map = co.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 return (assignment_map, initialized_variable_names) """
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,398
quantapix/qnarre
refs/heads/main
/qnarre/prep/config/rag.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from ... import core as qc import copy class PreTrained(qc.PreTrained): hs = qc.Hypers( [], dict( BOS=None, dataset_split="train", dataset="wiki_dpr", decoder_start_token_id=None, do_deduplication=True, do_marginalize=False, doc_sep=" // ", EOS=None, exclude_bos_score=False, forced_eos_token_id=None, index_name="compressed", index_path=None, is_composition=True, is_enc_dec=True, label_smoothing=0.0, max_combined_length=300, model_type="rag", n_docs=5, output_retrieved=False, PAD=None, passages_path=None, prefix=None, reduce_loss=False, retrieval_batch_size=8, retrieval_vector_size=768, s_vocab=None, title_sep=" / ", use_dummy_dataset=False, y_cache=True, ), ) @classmethod def from_pretrained(cls, *args, **kw): kw["_fast_init"] = False return super().from_pretrained(*args, **kw) @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path=None, generator_pretrained_model_name_or_path=None, retriever=None, **kw, ): kw_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kw.items() if argument.startswith("question_encoder_") } kw_generator = { argument[len("generator_") :]: value for argument, value in kw.items() if argument.startswith("generator_") } # remove question_encoder, generator kw from kw for key in kw_question_encoder.keys(): del kw["question_encoder_" + key] for key in kw_generator.keys(): del kw["generator_" + key] question_encoder = kw_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None from ..auto.modeling_auto import AutoModel if "config" not in kw_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config, kw_question_encoder = AutoConfig.from_pretrained( question_encoder_pretrained_model_name_or_path, **kw_question_encoder, return_unused_kw=True, ) kw_question_encoder["config"] = question_encoder_config question_encoder = AutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, **kw_question_encoder ) generator = kw_generator.pop("model", None) if generator is None: assert generator_pretrained_model_name_or_path is not None from ..auto.modeling_auto import AutoModelForSeq2SeqLM if "config" not in kw_generator: from ..auto.configuration_auto import AutoConfig generator_config, kw_generator = AutoConfig.from_pretrained( generator_pretrained_model_name_or_path, **kw_generator, return_unused_kw=True, ) kw_generator["config"] = generator_config generator = AutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, **kw_generator ) # instantiate config with corresponding kw config = kw.get("config", None) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kw ) return cls( question_encoder=question_encoder, generator=generator, config=config, retriever=retriever, ) @classmethod def from_question_encoder_generator_configs( cls, question_encoder_config, generator_config, **kw ): return cls( question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kw, ) def to_dict(self): y = copy.deepcopy(self.__dict__) y["question_encoder"] = self.question_encoder.to_dict() y["generator"] = self.generator.to_dict() y["model_type"] = self.__class__.model_type return y
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,399
quantapix/qnarre
refs/heads/main
/qnarre/prep/config/funnel.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from ... import core as qc class PreTrained(qc.PreTrained): hs = qc.Hypers( [], dict( act="gelu_new", attention_type="relative_shift", block_repeats=None, block_sizes=[4, 4, 4], d_head=64, d_inner=3072, d_model=768, drop_act=0.0, drop_attn=0.1, drop=0.1, eps=1e-9, init_range=0.1, initializer_std=None, model_type="funnel", n_dec_lays=2, n_heads=12, n_pos=512, n_typ=3, pool_q_only=True, pooling_type="mean", s_vocab=30522, separate_cls=True, truncate_seq=True, ), ) def _init_weights(self, module): classname = module.__class__.__name__ if classname.find("Linear") != -1: if getattr(module, "weight", None) is not None: if self.config.initializer_std is None: fan_out, fan_in = module.weight.shape std = np.sqrt(1.0 / float(fan_in + fan_out)) else: std = self.config.initializer_std qc.init.normal_(module.weight, std=std) if getattr(module, "bias", None) is not None: qc.init.constant_(module.bias, 0.0) elif classname == "FunnelRelMultiheadAttention": qc.init.uniform_(module.r_w_bias, b=self.config.init_range) qc.init.uniform_(module.r_r_bias, b=self.config.init_range) qc.init.uniform_(module.r_kernel, b=self.config.init_range) qc.init.uniform_(module.r_s_bias, b=self.config.init_range) qc.init.uniform_(module.seg_embed, b=self.config.init_range) elif classname == "FunnelEmbeddings": std = 1.0 if self.config.initializer_std is None else self.config.initializer_std qc.init.normal_(module.word_embeddings.weight, std=std) if module.word_embeddings.padding_idx is not None: module.word_embeddings.weight.data[module.padding_idx].zero_() MAP = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json", "funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json", "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", }
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,400
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/ops/blocksparse/__init__.py
from .matmul import matmul from .softmax import softmax __all__ = [ "matmul", "softmax", ]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,401
quantapix/qnarre
refs/heads/main
/qnarre/prep/config/mbart.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch from collections import OrderedDict from ... import core as qc class PreTrained(qc.PreTrained): hs = qc.Hypers( [], dict( act_fun="gelu", BOS=0, d_dec_ffn=4096, d_enc_ffn=4096, d_model=1024, drop_act=0.0, drop_attn=0.0, drop_dec=0.0, drop_enc=0.0, drop_proj=0.0, drop=0.1, EOS=2, forced_EOS=2, grad_checkpoint=True, init_std=0.02, is_enc_dec=True, model_type="mbart", n_dec_heads=16, n_dec_lays=12, n_enc_heads=16, n_enc_lays=12, n_pos=1024, PAD=1, s_vocab=50265, scale=False, y_cache=True, ), ) def _init_weights(self, module): std = self.cfg.init_std if isinstance(module, qc.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, qc.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_grad_checkpoint(self, module, value=False): if isinstance(module, (MBartDecoder, MBartDecoder)): module.grad_checkpoint = value @property def dummy_inputs(self): pad = self.cfg.PAD input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad]], device=self.device) dummy_inputs = { "mask": input_ids.ne(pad), "input_ids": input_ids, } return dummy_inputs MAP = { "facebook/mbart-large-cc25": dict( add_bias_logits=False, add_final_norm=True, archs=["MBartForConditionalGeneration"], id2label={"0": "LABEL_0", "1": "LABEL_1", "2": "LABEL_2"}, label2id={"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}, max_len=1024, n_beams=5, n_lays=12, normalize_embedding=True, num_labels=3, pre_norm=True, s_vocab=250027, scale=True, static_position_embeddings=False, task_params={"translation_en_to_ro": {"dec_START": 250020}}, y_prev=True, ), } class Onnx: @property def inputs(self): if self.task in ["default", "seq2seq-lm"]: y = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: y["decoder_input_ids"] = {0: "batch"} y["dec_m"] = { 0: "batch", 1: "past_decoder_sequence + sequence", } else: y["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} y["dec_m"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(y, direction="inputs") elif self.task == "causal-lm": y = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: n_enc_lays, _ = self.n_lays for i in range(n_enc_lays): y[f"prev_kv.{i}.key"] = { 0: "batch", 2: "past_sequence + sequence", } y[f"prev_kv.{i}.value"] = { 0: "batch", 2: "past_sequence + sequence", } else: y = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("dec_m", {0: "batch", 1: "decoder_sequence"}), ] ) return y @property def outputs(self): if self.task in ["default", "seq2seq-lm"]: y = super().outputs else: y = super().outputs if self.use_past: n_enc_lays, _ = self.n_lays for i in range(n_enc_lays): y[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} y[f"present.{i}.value"] = { 0: "batch", 2: "past_sequence + sequence", } return y def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer, batch_size=-1, seq_length=-1, is_pair=False, framework=None, ): encoder_inputs = ( self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) ) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = ( self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} y = dict(**encoder_inputs, **decoder_inputs) if self.use_past: batch, encoder_seq_length = y["input_ids"].shape decoder_seq_length = y["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.n_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.d_model // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.d_model // num_decoder_attention_heads, ) y["dec_m"] = torch.cat( [y["dec_m"], torch.ones(batch, decoder_past_length)], dim=1, ) y["prev_kv"] = [] n_enc_lays, n_dec_lays = self.n_lays min_num_layers = min(n_enc_lays, n_dec_lays) max_num_layers = max(n_enc_lays, n_dec_lays) - min_num_layers remaining_side_name = "encoder" if n_enc_lays > n_dec_lays else "decoder" for _ in range(min_num_layers): y["prev_kv"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): y["prev_kv"].append((torch.zeros(shape), torch.zeros(shape))) return y def _generate_dummy_inputs_for_causal_lm( self, tokenizer, batch_size=-1, seq_length=-1, is_pair=False, framework=None, ): y = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: batch, seqlen = y["input_ids"].shape past_key_values_length = seqlen + 2 n_enc_lays, _ = self.n_lays num_encoder_attention_heads, _ = self.n_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.d_model // num_encoder_attention_heads, ) y["mask"] = torch.cat([y["mask"], torch.ones(batch, past_key_values_length)], dim=1) y["prev_kv"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(n_enc_lays) ] return y def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer, batch_size=-1, seq_length=-1, is_pair=False, framework=None, ): batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.DEFAULT_FIXED_BATCH, num_token_to_add=0 ) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.DEFAULT_FIXED_SEQUENCE, num_token_to_add=token_to_add, ) dummy_input = [" ".join([tokenizer.unk]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer, batch_size=-1, seq_length=-1, is_pair=False, framework=None, ): if self.task in ["default", "seq2seq-lm"]: y = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework, ) elif self.task == "causal-lm": y = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework, ) else: y = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework, ) return y def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,402
quantapix/qnarre
refs/heads/main
/qnarre/run/swag.py
# Copyright 2021 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # fine-tune for multiple choice import logging import random import torch from dataclasses import dataclass from datasets import load_metric from itertools import chain from torch.utils.data import DataLoader from transformers import default_data_collator, AutoModelForChoicepleChoice, PreTrainedTokenizerBase from .params import TRAIN, EVAL, ALL, EACH from .runner import Runner as Base log = logging.getLogger(__name__) @dataclass class DataCollatorForChoicepleChoice: tokenizer: PreTrainedTokenizerBase padding = True max_len = None pad_to_multiple_of = None def __call__(self, xs): label_name = "label" if "label" in xs[0].keys() else "labels" labels = [x.pop(label_name) for x in xs] size = len(xs) choices = len(xs[0]["input_ids"]) ys = [[{k: v[i] for k, v in x.items()} for i in range(choices)] for x in xs] ys = list(chain(*ys)) ys = self.tokenizer.pad( ys, padding=self.padding, max_len=self.max_len, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) ys = {k: v.view(size, choices, -1) for k, v in ys.items()} ys["labels"] = torch.tensor(labels, dtype=torch.int64) return ys class Runner(Base): AutoModel = AutoModelForChoicepleChoice @property def cols(self): if self._cols is None: ds = self.dataset if ds[TRAIN] is not None: cs = ds[TRAIN].column_names else: cs = ds[EVAL].column_names e = [f"ending{x}" for x in range(4)] c = "sent1" q = "sent2" l = "label" if "label" in cs else "labels" self._cols = {ALL: cs, EACH: [e, c, q, l]} return self._cols @property def train_ds(self): if self._train_ds is None: ps, mgr, ds = self.params, self.mgr, self.dataset with mgr.main_process_first(): self._dataset = y = ds.map( self.prep_for_train, batched=True, remove_columns=self.cols[ALL], desc="Running tokenizer on dataset", ) y = y[TRAIN] if ps.max_train_samples is not None: y = y.select(range(ps.max_train_samples)) for i in random.sample(range(len(y)), 3): log.info(f"Sample {i} of the training set: {y[i]}") self._train_ds = y return self._train_ds def prep_for_train(self, xs): ps = self.params e_col, c_col, q_col, l_col = self.cols[EACH] firsts = [[x] * 4 for x in xs[c_col]] qs = xs[q_col] seconds = [[f"{q} {xs[x][i]}" for x in e_col] for i, q in enumerate(qs)] firsts = list(chain(*firsts)) seconds = list(chain(*seconds)) ys = self.tokenizer( firsts, seconds, max_len=ps.max_len, padding=self.padding, truncation=True, ) ys = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in ys.items()} ys["labels"] = xs[l_col] return ys @property def loaders(self): if self._loaders is None: ps, mgr = self.params, self.mgr if ps.pad_to_max_length: c = default_data_collator else: c = DataCollatorForChoicepleChoice( self.tokenizer, pad_to_multiple_of=(8 if mgr.use_fp16 else None) ) t = DataLoader( self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.train_batch_size ) e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.eval_batch_size) self._loaders = {TRAIN: t, EVAL: e} return self._loaders @property def metric(self): if self._metric is None: self._metric = load_metric("accuracy") return self._metric def eval_epoch(self, e): m, mgr = self.model, self.mgr m.eval() for xs in self.loaders[EVAL]: with torch.no_grad(): ys = m(**xs) ys = ys.logits.argmax(dim=-1) self.metric.add_batch(predictions=mgr.gather(ys), references=mgr.gather(xs["labels"])) y = self.metric.compute() mgr.print(f"epoch {e}: {y}") def main(): x = Runner() x.dataset x.config x.tokenizer x.model x.model.resize_token_embeddings(len(x.tokenizer)) x.loaders x.prepare() x.train() x.save() if __name__ == "__main__": main() """ accelerate launch swag.py \ --model_name bert-base-uncased \ --dataset_name swag \ --out_dir /tmp/test-swag-no-trainer \ --pad_to_max_length """
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,403
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/analyzer.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .counter import counters from .contain import Contains class Analyzer: san_args = ((('passed', '.'), ('failed', 'F')), 'Sanity:') def check_sanity(self, src, **kw): with counters(self.san_args, kw) as cs: return cs coh_args = ((('record', ''), ('purged', 'd'), ('equal', '='), ('full', '<'), ('partial', '~')), 'Coherence:') def check_coherence(self, src, **kw): gs = Contains() with counters(self.coh_args, kw) as cs: gs.grow_from(src, **kw) mg, fg = gs.record, gs.full for m in sorted(mg.nodes()): if m in fg: for m2 in fg.successors(m): if m2 in fg and m in fg.successors(m2): print(m, m2) print(mg.node[m]['nominal'][:30], mg.node[m2]['nominal'][:30]) return cs
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,404
quantapix/qnarre
refs/heads/main
/tools/triton/python/tutorials/03-matrix-multiplication.py
""" Matrix Multiplication ===================== In this tutorial, you will write a very short high-performance FP16 matrix multiplication kernel that achieves performance on parallel with cuBLAS. You will specifically learn about: * Block-level matrix multiplications. * Multi-dimensional pointer arithmetics. * Program re-ordering for improved L2 cache hit rate. * Automatic performance tuning. """ # %% # Motivations # ----------- # # Matrix multiplications are a key building block of most modern high-performance computing systems. # They are notoriously hard to optimize, hence their implementation is generally done by # hardware vendors themselves as part of so-called "kernel libraries" (e.g., cuBLAS). # Unfortunately, these libraries are often proprietary and cannot be easily customized # to accommodate the needs of modern deep learning workloads (e.g., fused activation functions). # In this tutorial, you will learn how to implement efficient matrix multiplications by # yourself with Triton, in a way that is easy to customize and extend. # # Roughly speaking, the kernel that we will write will implement the following blocked # algorithm to multiply a (M, K) by a (K, N) matrix: # # .. code-block:: python # # # Do in parallel # for m in range(0, M, BLOCK_SIZE_M): # # Do in parallel # for n in range(0, N, BLOCK_SIZE_N): # acc = zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=float32) # for k in range(0, K, BLOCK_SIZE_K): # a = A[m : m+BLOCK_SIZE_M, k : k+BLOCK_SIZE_K] # b = B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N] # acc += dot(a, b) # C[m : m+BLOCK_SIZE_M, n : n+BLOCK_SIZE_N] = acc # # where each iteration of the doubly-nested for-loop is performed by a dedicated Triton program instance. # %% # Compute Kernel # -------------- # # The above algorithm is, actually, fairly straightforward to implement in Triton. # The main difficulty comes from the computation of the memory locations at which blocks # of :code:`A` and :code:`B` must be read in the inner loop. For that, we need # multi-dimensional pointer arithmetics. # # Pointer Arithmetics # ~~~~~~~~~~~~~~~~~~~ # # For a row-major 2D tensor :code:`X`, the memory location of :code:`X[i, j]` is given b # y :code:`&X[i, j] = X + i*stride_xi + j*stride_xj`. # Therefore, blocks of pointers for :code:`A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K]` and # :code:`B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]` can be defined in pseudo-code as: # # .. code-block:: python # # &A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K] = a_ptr + (m : m+BLOCK_SIZE_M)[:, None]*A.stride(0) + (k : k+BLOCK_SIZE_K)[None, :]*A.stride(1); # &B[k : k+BLOCK_SIZE_K, n:n+BLOCK_SIZE_N] = b_ptr + (k : k+BLOCK_SIZE_K)[:, None]*B.stride(0) + (n : n+BLOCK_SIZE_N)[None, :]*B.stride(1); # # Which means that pointers for blocks of A and B can be initialized (i.e., :code:`k=0`) in Triton as the following # code. Also note that we need an extra modulo to handle the case where :code:`M` is not a multiple of # :code:`BLOCK_SIZE_M` or :code:`N` is not a multiple of :code:`BLOCK_SIZE_N`, in which case we can pad the data with # some useless values, which will not contribute to the results. For the :code:`K` dimension, we will handle that later # using masking load semantics. # # .. code-block:: python # # offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M # offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N # offs_k = tl.arange(0, BLOCK_SIZE_K) # a_ptrs = a_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak) # b_ptrs = b_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn) # # And then updated in the inner loop as follows: # # .. code-block:: python # # a_ptrs += BLOCK_SIZE_K * stride_ak; # b_ptrs += BLOCK_SIZE_K * stride_bk; # # # L2 Cache Optimizations # ~~~~~~~~~~~~~~~~~~~~~~ # # As mentioned above, each program instance computes a :code:`[BLOCK_SIZE_M, BLOCK_SIZE_N]` # block of :code:`C`. # It is important to remember that the order in which these blocks are computed does # matter, since it affects the L2 cache hit rate of our program. and unfortunately, a # a simple row-major ordering # # .. code-block:: Python # # pid = triton.program_id(0); # grid_m = (M + BLOCK_SIZE_M - 1) // BLOCK_SIZE_M; # grid_n = (N + BLOCK_SIZE_N - 1) // BLOCK_SIZE_N; # pid_m = pid / grid_n; # pid_n = pid % grid_n; # # is just not going to cut it. # # One possible solution is to launch blocks in an order that promotes data reuse. # This can be done by 'super-grouping' blocks in groups of :code:`GROUP_M` rows before # switching to the next column: # # .. code-block:: python # # # Program ID # pid = tl.program_id(axis=0) # # Number of program ids along the M axis # num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) # # Number of programs ids along the N axis # num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) # # Number of programs in group # num_pid_in_group = GROUP_SIZE_M * num_pid_n # # Id of the group this program is in # group_id = pid // num_pid_in_group # # Row-id of the first program in the group # first_pid_m = group_id * GROUP_SIZE_M # # If `num_pid_m` isn't divisible by `GROUP_SIZE_M`, the last group is smaller # group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) # # *Within groups*, programs are ordered in a column-major order # # Row-id of the program in the *launch grid* # pid_m = first_pid_m + (pid % group_size_m) # # Col-id of the program in the *launch grid* # pid_n = (pid % num_pid_in_group) // group_size_m # # For example, in the following matmul where each matrix is 9 blocks by 9 blocks, # we can see that if we compute the output in row-major ordering, we need to load 90 # blocks into SRAM to compute the first 9 output blocks, but if we do it in grouped # ordering, we only need to load 54 blocks. # # .. image:: grouped_vs_row_major_ordering.png # # In practice, this can improve the performance of our matrix multiplication kernel by # more than 10\% on some hardware architecture (e.g., 220 to 245 TFLOPS on A100). # # %% # Final Result # ------------ import torch import triton import triton.language as tl # `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes: # - A list of `triton.Config` objects that define different configurations of # meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try # - An auto-tuning *key* whose change in values will trigger evaluation of all the # provided configs @triton.autotune( configs=[ triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), ], key=['M', 'N', 'K'], ) @triton.jit def matmul_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, # Matrix dimensions M, N, K, # The stride variables represent how much to increase the ptr by when moving by 1 # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr` # by to get the element one row down (A has M rows). stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, ACTIVATION: tl.constexpr, ): """Kernel for computing the matmul C = A x B. A has shape (M, K), B has shape (K, N) and C has shape (M, N) """ # ----------------------------------------------------------- # Map program ids `pid` to the block of C it should compute. # This is done in a grouped ordering to promote L2 data reuse. # See above `L2 Cache Optimizations` section for details. pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + (pid % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m # ---------------------------------------------------------- # Create pointers for the first blocks of A and B. # We will advance this pointer as we move in the K direction # and accumulate # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers # See above `Pointer Arithmetics` section for details offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) # ----------------------------------------------------------- # Iterate to compute a block of the C matrix. # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block # of fp32 values for higher accuracy. # `accumulator` will be converted back to fp16 after the loop. accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the K dimension. # If it is out of bounds, set it to 0. a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) # We accumulate along the K dimension. accumulator += tl.dot(a, b) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk # You can fuse arbitrary activation functions here # while the accumulator is still in FP32! if ACTIVATION == "leaky_relu": accumulator = leaky_relu(accumulator) c = accumulator.to(tl.float16) # ----------------------------------------------------------- # Write back the block of the output matrix C with masks. offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :] c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N) tl.store(c_ptrs, c, mask=c_mask) # We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`. @triton.jit def leaky_relu(x): x = x + 1 return tl.where(x >= 0, x, 0.01 * x) # %% # We can now create a convenience wrapper function that only takes two input tensors, # and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel. def matmul(a, b, activation=""): # Check constraints. assert a.shape[1] == b.shape[0], "Incompatible dimensions" assert a.is_contiguous(), "Matrix A must be contiguous" assert b.is_contiguous(), "Matrix B must be contiguous" M, K = a.shape K, N = b.shape # Allocates output. c = torch.empty((M, N), device=a.device, dtype=a.dtype) # 1D launch kernel where each block gets its own program. grid = lambda META: ( triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), ) matmul_kernel[grid]( a, b, c, M, N, K, a.stride(0), a.stride(1), b.stride(0), b.stride(1), c.stride(0), c.stride(1), ACTIVATION=activation ) return c # %% # Unit Test # --------- # # We can test our custom matrix multiplication operation against a native torch implementation (i.e., cuBLAS). torch.manual_seed(0) a = torch.randn((512, 512), device='cuda', dtype=torch.float16) b = torch.randn((512, 512), device='cuda', dtype=torch.float16) triton_output = matmul(a, b) torch_output = torch.matmul(a, b) print(f"triton_output={triton_output}") print(f"torch_output={torch_output}") if torch.allclose(triton_output, torch_output, atol=1e-2, rtol=0): print("✅ Triton and Torch match") else: print("❌ Triton and Torch differ") # %% # Benchmark # --------- # # Square Matrix Performance # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We can now compare the performance of our kernel against that of cuBLAS. Here we focus on square matrices, # but feel free to arrange this script as you wish to benchmark any other matrix shape. @triton.testing.perf_report( triton.testing.Benchmark( x_names=['M', 'N', 'K'], # Argument names to use as an x-axis for the plot x_vals=[ 128 * i for i in range(2, 33) ], # Different possible values for `x_name` line_arg='provider', # Argument name whose value corresponds to a different line in the plot # Possible values for `line_arg` line_vals=['cublas', 'triton'], # Label name for the lines line_names=["cuBLAS", "Triton"], # Line styles styles=[('green', '-'), ('blue', '-')], ylabel="TFLOPS", # Label name for the y-axis plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot. args={}, ) ) def benchmark(M, N, K, provider): a = torch.randn((M, K), device='cuda', dtype=torch.float16) b = torch.randn((K, N), device='cuda', dtype=torch.float16) quantiles = [0.5, 0.2, 0.8] if provider == 'cublas': ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), quantiles=quantiles) if provider == 'triton': ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b), quantiles=quantiles) perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3) return perf(ms), perf(max_ms), perf(min_ms) benchmark.run(show_plots=True, print_data=True)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,405
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/mpnet.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import collections import os import unicodedata from ...tokens.utils import ( AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace, ) VOCAB_FS = {"vocab_file": "vocab.txt"} VOCAB_MAP = { "vocab_file": { "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/vocab.txt", } } INPUT_CAPS = { "microsoft/mpnet-base": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/mpnet-base": {"do_lower_case": True}, } def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, "r", encoding="utf-8") as reader: tokens = reader.readlines() for index, token in enumerate(tokens): token = token.rstrip("\n") vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class Tokenizer(PreTrainedTokenizer): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION input_caps = INPUT_CAPS model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, bos="<s>", eos="</s>", sep="</s>", cls="<s>", unk="[UNK]", pad="<pad>", msk="<mask>", tokenize_chinese_chars=True, strip_accents=None, **kw, ): bos = AddedToken(bos, lstrip=False, rstrip=False) if isinstance(bos, str) else bos eos = AddedToken(eos, lstrip=False, rstrip=False) if isinstance(eos, str) else eos sep = AddedToken(sep, lstrip=False, rstrip=False) if isinstance(sep, str) else sep cls = AddedToken(cls, lstrip=False, rstrip=False) if isinstance(cls, str) else cls unk = AddedToken(unk, lstrip=False, rstrip=False) if isinstance(unk, str) else unk pad = AddedToken(pad, lstrip=False, rstrip=False) if isinstance(pad, str) else pad msk = AddedToken(msk, lstrip=True, rstrip=False) if isinstance(msk, str) else msk super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, bos=bos, eos=eos, unk=unk, sep=sep, cls=cls, pad=pad, msk=msk, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kw, ) if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()] ) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, ) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk=self.unk) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def s_vocab(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk) def convert_tokens_to_string(self, tokens): out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def build_inputs_with_special_tokens(self, toks_0, toks_1=None): if toks_1 is None: return [self.cls_token_id] + toks_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + toks_0 + sep + sep + toks_1 + sep def get_special_tokens_mask( self, toks_0, toks_1=None, has_specials=False, ): if has_specials: return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True) if toks_1 is None: return [1] + ([0] * len(toks_0)) + [1] return [1] + ([0] * len(toks_0)) + [1, 1] + ([0] * len(toks_1)) + [1] def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): sep = [self.sep_token_id] cls = [self.cls_token_id] if toks_1 is None: return len(cls + toks_0 + sep) * [0] return len(cls + toks_0 + sep + sep + toks_1 + sep) * [0] def save_vocabulary(self, dir, pre=None): index = 0 if os.path.isdir(dir): vocab_file = os.path.join( dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"], ) else: vocab_file = (pre + "-" if pre else "") + dir with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) index = token_index writer.write(token + "\n") index += 1 return (vocab_file,) class BasicTokenizer(object): def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): def __init__(self, vocab, unk, max_input_chars_per_word=100): self.vocab = vocab self.unk = unk self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk) else: output_tokens.extend(sub_tokens) return output_tokens
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,406
quantapix/qnarre
refs/heads/main
/qnarre/prep/config/gpt_neo.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from ... import core as qc class PreTrained(qc.PreTrained): hs = qc.Hypers( [], dict( act="gelu_new", attention_types=[[["global", "local"], 12]], BOS=50256, d_ff=None, d_hidden=2048, drop_attn=0.0, drop_embed=0.0, drop_resid=0.0, drop_sum_first=0.1, EOS=50256, eps=1e-5, init_range=0.02, model_type="gpt_neo", n_heads=16, n_lays=24, n_pos=2048, s_vocab=50257, s_win=256, sum_act=None, sum_proj=True, sum_type="cls_index", sum_use_proj=True, y_cache=True, ), ) @staticmethod def expand_attention_types_params(attention_types): attentions = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def _init_weights(self, module): if isinstance(module, (qc.Linear,)): module.weight.data.normal_(mean=0.0, std=self.config.init_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, qc.Embed): module.weight.data.normal_(mean=0.0, std=self.config.init_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, qc.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, GPTNeoModel): module.gradient_checkpointing = value def custom_unfold(input, dimension, size, step): import torch shape = input.size() rank = len(shape) sizedim = shape[dimension] low_indices = torch.arange(0, sizedim, step) min_length = torch.div(sizedim - size, step, rounding_mode="floor") + 1 indices = torch.arange(size) + low_indices[:min_length][:, None] s = [slice(None)] * rank s[dimension] = indices sliced = input[s] perm = list(range(0, rank + 1)) perm.append(perm.pop(dimension + 1)) return sliced.permute(perm) def custom_get_block_length_and_num_blocks(seq_length, s_win): import torch candidates = torch.arange(1, s_win) remainders = torch.remainder(seq_length, candidates) divisor_indices = remainders == 0 divisors = candidates[divisor_indices] largest_divisor = torch.max(divisors) return largest_divisor, torch.div(seq_length, largest_divisor, rounding_mode="floor") GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", }
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,407
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/base.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os import re import sys import pathlib as pth import collections as co from hashlib import blake2b def num_to_name(n): return '{:0>3d}0'.format(n) def digest(value): return blake2b(value, digest_size=20).hexdigest() def rst_def(pref, name): return '\n.. _{0}/{1}:\n\n{1}\n{2}\n'.format(pref, name, '=' * len(name)) def rst_ref(pref, name): return ':ref:`{}/{}`'.format(pref, name) def lister(path, rng=(), suffs=('.png', '.jpg', '.mov')): with os.scandir(path) as es: for e in es: p = pth.Path(e.path) if p.is_file(): if p.suffix in suffs and (not rng or p.stem in rng): yield p elif p.is_dir(): yield from lister(p, rng, suffs) Adr = co.namedtuple('Adr', 'display_name addr_spec') Adr.__new__.__defaults__ = ('', ) class Adrs(co.namedtuple('Adrs', 'addresses')): adr_pat = r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+' adr_re = re.compile(r'(?aim)' + adr_pat) @classmethod def has_adr(cls, txt): return bool(cls.adr_re.search(txt)) @classmethod def from_txt(cls, txt): s = co.OrderedDict() txt = txt.replace(';', ',') for c in (', MD', ', Md', ',M.D.', ' M.D.', "'", '"', '*', '&', 'esquire', 'Esquire'): txt = txt.replace(c, ' ') t = txt for c in (',', '<', '[', '(', '>', ']', ')', 'mailto:'): t = t.replace(c, ' ') for w in t.split(): if cls.adr_re.match(w): s[w] = None if s: return cls(tuple(Adr(None, a) for a in s.keys())) return (','.join(w for w in txt.split(',') if w.strip()), None) def camelize(txt, upper_first=True): if upper_first: return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), txt) else: return txt[0].lower() + camelize(txt)[1:] def link_class(label): n = 'lnk_' + label n = camelize(n[:-1] if n.endswith('_') else n) d = dict(label=label, directed=label.endswith('ing')) globals()[n] = t = type(n, (object, ), d) return t for l in ('full', 'partial'): link_class(l) ls = ( 'audience', 'bcc', 'cc', 'date', 'from_', 'including', 'record_id', 'proximity', 'quoting', 'referring', 'replying', 'source', 'subject', 'summary', 'tags', 'title', 'to', 'topic', ) Hdr = co.namedtuple('Hdr', ls) Hdr.links = tuple(link_class(l) for l in Hdr._fields) class Record: label = 'record' Traits = co.namedtuple('Traits', 'role background justify slug') Traits.__new__.__defaults__ = (None, None, 0, None) class Config: EQ = 'eq' LT = 'lt' GT = 'gt' TBD = 'TBD' DEFAULT = 'default' EXCLUDED = 'excluded' ENH = '_enh' HTML = 'html' ATTM = 'attm' PLAIN = 'plain' CHAIN = 'chain' def_from = '' include_adrs = () exclude_specs = exclude_mids = () exclude_doms = exclude_locs = exclude_fulls = () ROOT = 'root' PRIV = 'priv' PROT = 'prot' PUBL = 'publ' OPEN = 'open' subject_aliases = topic_aliases = () def_contacts = contact_aliases = bridge_aliases = { None: (), ROOT: (), PRIV: (), PROT: (), PUBL: (), OPEN: () } SRC = 'src/' DST = 'dst/' CTXT = 'ctxt' DOCS = 'docs' PICS = 'pics' RECS = 'recs' ARCH = '/arch/' REPO = '/repo/' QNAR = 'qnar/' SAFE = '/safe/' BLOG = '/blog/' MAIN = '/main/' MBOX = 'mbox' TBOX = 'tbox' BBOX = 'bbox' # SBOX = 'transcripts' SBOX = 'try' IMGS = 'imgs' ORGS = 'orgs' nominal_offs = book_names = () line_junk = line_replace = fixups = quotes = () alloweds = substitutes = all_traits = {} web_templates = '' # Base RGB FFC0C0, Hue 0, Dist 90, Lightest Pale Pastel gray = 'e8e8e8' green = 'B8F4B8' lgreen = 'E4FDE4' blue = 'B7D0EC' lblue = 'E4EFFB' red = 'FFC0C0' lred = 'FFE6E6' yellow = 'FFF4C0' lyellow = 'FFFBE6' right = 8 lright = right - 3 middle = 7 lmiddle = middle - 3 left = 6 lleft = left - 3 @property def recs_src(self): return self.SRC + self.RECS @property def recs_arch(self): return self.SRC + self.RECS + self.ARCH @property def recs_repo(self): return self.SRC + self.RECS + self.REPO @property def main_src(self): return self.SRC + self.DOCS + self.MAIN @property def blog_src(self): return self.SRC + self.DOCS + self.BLOG @property def priv_src(self): return self.SRC + self.DOCS + self.SAFE @property def docs_src(self): return self.SRC + self.DOCS @property def sbox_src(self): return self.SRC + self.DOCS + self.REPO + self.SBOX @property def mbox_src(self): return self.recs_repo + self.MBOX @property def tbox_src(self): return self.recs_repo + self.TBOX @property def bbox_src(self): return self.recs_repo + self.BBOX @property def qnar_dst(self): return self.DST + self.QNAR @property def html_dst(self): return self.DST + self.QNAR + self.HTML @property def attm_dst(self): return self.DST + self.QNAR + self.ATTM config = Config() sys.path.insert(0, str(pth.Path.cwd())) try: import qnarre_settings qnarre_settings.apply_to(config) except ImportError as e: print('Failed to import a qnarre_settings.py', e) sys.path.pop(0) def traits_for(key): ts = config.all_traits.get(str(key), Traits()) if ts.slug: ts = config.all_traits.get(ts.slug, Traits()) return ts
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,408
quantapix/qnarre
refs/heads/main
/qnarre/prep/config/megatron.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from ... import core as qc class PreTrained(qc.PreTrained): hs = qc.Hypers( [], dict( act="gelu", d_ff=4096, d_hidden=1024, drop_attn=0.1, drop=0.1, eps=1e-12, init_range=0.02, model_type="megatron-bert", n_heads=16, n_lays=24, n_pos=512, n_typ=2, PAD=0, pos_type="absolute", s_vocab=29056, y_cache=True, ), ) def _init_weights(self, module): if isinstance(module, (qc.Linear, qc.Embed)): module.weight.data.normal_(mean=0.0, std=self.config.init_range) elif isinstance(module, qc.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, qc.Linear) and module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, MegatronBertEncoder): module.gradient_checkpointing = value MAP = {}
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,409
quantapix/qnarre
refs/heads/main
/qnarre/models/flash/llama.py
# Copyright (c) 2023, Tri Dao. import math import json import re from pathlib import Path from collections import OrderedDict import torch import torch.nn.functional as F from transformers import GPT2Config, LlamaConfig def remap_state_dict_meta_llama(state_dict, config): def key_mapping_layers(key): return f'transformer.{key}' if not key.startswith('output.') else key state_dict = OrderedDict((key_mapping_layers(k), v) for k, v in state_dict.items()) # Word embedding def key_mapping_emb(key): return re.sub(r'^transformer.tok_embeddings.', 'transformer.embeddings.word_embeddings.', key) state_dict = OrderedDict((key_mapping_emb(k), v) for k, v in state_dict.items()) word_embeddings = state_dict.pop('transformer.embeddings.word_embeddings.weight') # It's possible that vocab_size is padded to be a multiple of 8, for example. pad_vocab_size_multiple = getattr(config, 'pad_vocab_size_multiple', 1) vocab_size = (math.ceil(word_embeddings.shape[0] / pad_vocab_size_multiple) * pad_vocab_size_multiple) state_dict['transformer.embeddings.word_embeddings.weight'] = F.pad( word_embeddings, (0, 0, 0, vocab_size - word_embeddings.shape[0]) ) if getattr(config, 'tie_word_embeddings'): state_dict['lm_head.weight'] = state_dict['transformer.embeddings.word_embeddings.weight'] else: output_embeddings = state_dict.pop('output.weight') # Need to recompute vocab_size since LLaMa shards the word embeddings and output embeddings # differently. vocab_size = (math.ceil(output_embeddings.shape[0] / pad_vocab_size_multiple) * pad_vocab_size_multiple) # It's possible that vocab_size is padded to be a multiple of 8, for example. state_dict['lm_head.weight'] = F.pad( output_embeddings, (0, 0, 0, vocab_size - output_embeddings.shape[0]) ) # LayerNorm def key_mapping_ln(key): key = re.sub(r'^transformer.norm.', r'transformer.ln_f.', key) key = re.sub(r'^transformer.layers.(\d+).attention_norm.', r'transformer.layers.\1.norm1.', key) key = re.sub(r'^transformer.layers.(\d+).ffn_norm.', r'transformer.layers.\1.norm2.', key) return key state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items()) # MLP for l in range(config.n_layer): w1 = state_dict.pop(f'transformer.layers.{l}.feed_forward.w1.weight') w3 = state_dict.pop(f'transformer.layers.{l}.feed_forward.w3.weight') # Our ordering is different state_dict[f'transformer.layers.{l}.mlp.fc1.weight'] = torch.cat([w3, w1], dim=0) def key_mapping_mlp(key): return re.sub(r'^transformer.layers.(\d+).feed_forward.w2.', r'transformer.layers.\1.mlp.fc2.', key) state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items()) # Attention for l in range(config.n_layer): Wq = state_dict.pop(f'transformer.layers.{l}.attention.wq.weight') Wk = state_dict.pop(f'transformer.layers.{l}.attention.wk.weight') Wv = state_dict.pop(f'transformer.layers.{l}.attention.wv.weight') state_dict[f'transformer.layers.{l}.mixer.Wqkv.weight'] = torch.cat([Wq, Wk, Wv], dim=0) # We don't store these state_dict.pop(f'transformer.layers.{l}.attention.inner_attention.rope.freqs', None) def key_mapping_attn(key): return re.sub(r'^transformer.layers.(\d+).attention.wo.', r'transformer.layers.\1.mixer.out_proj.', key) state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items()) return state_dict def config_from_checkpoint(checkpoint_path: str, model_name: str) -> LlamaConfig: """Load a LlamaConfig from a checkpoint path.""" with open(Path(checkpoint_path) / model_name / 'params.json') as f: params = json.load(f) config = LlamaConfig(hidden_size=params['dim'], intermediate_size=None, num_attention_heads=params['n_heads'], num_hidden_layers=params['n_layers'], rms_norm_eps=params['norm_eps']) return config def state_dicts_from_checkpoint(checkpoint_path: str, model_name: str) -> dict: # Need to sort, otherwise we mess up the ordering and the weights are wrong return [torch.load(path, map_location='cpu') for path in sorted((Path(checkpoint_path) / model_name).glob('consolidated.*.pth'))] def llama_config_to_gpt2_config(llama_config: LlamaConfig) -> GPT2Config: return GPT2Config( vocab_size=llama_config.vocab_size, n_positions=0, # No absolute position embedding n_embd=llama_config.hidden_size, n_layer=llama_config.num_hidden_layers, n_head=llama_config.num_attention_heads, n_inner=llama_config.intermediate_size, activation_function='swiglu', # Hardcode since HF calls it 'silu' # Llama doesn't have dropout, idk if it's because they only release the inference code resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=llama_config.rms_norm_eps, initializer_range=llama_config.initializer_range, bos_token_id=llama_config.bos_token_id, eos_token_id=llama_config.eos_token_id, # These are new arguments not in the original GPT2Config pad_token_id=llama_config.pad_token_id, # Idk if this does anything rms_norm=True, rotary_emb_fraction=1.0, rotary_emb_interleaved=True, tie_word_embeddings=False, qkv_proj_bias=False, out_proj_bias=False, mlp_fc1_bias=False, mlp_fc2_bias=False, )
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,410
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/ops/matmul_perf_model.py
import heapq import torch import triton import triton._C.libtriton.triton as _triton from triton.runtime import driver from triton.testing import get_dram_gbps, get_max_simd_tflops, get_max_tensorcore_tflops def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_tensorcore_tflops(dtype, backend, device) return tflops def get_simd_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, backend, device) return tflops def get_tflops(backend, device, num_ctas, num_warps, dtype): capability = torch.cuda.get_device_capability(device) if capability[0] < 8 and dtype == torch.float32: return get_simd_tflops(backend, device, num_ctas, num_warps, dtype) return get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype) def estimate_matmul_time( # backend, device, num_warps, num_stages, A, B, C, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, debug=False, **kwargs ): ''' return estimated running time in ms = max(compute, loading) + store ''' backend = _triton.runtime.backend.CUDA device = torch.cuda.current_device() dtype = A.dtype dtsize = A.element_size() num_cta_m = triton.cdiv(M, BLOCK_M) num_cta_n = triton.cdiv(N, BLOCK_N) num_cta_k = SPLIT_K num_ctas = num_cta_m * num_cta_n * num_cta_k # If the input is smaller than the block size M, N = max(M, BLOCK_M), max(N, BLOCK_N) # time to compute total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS tput = get_tflops(backend, device, num_ctas, num_warps, dtype) compute_ms = total_ops / tput # time to load data num_sm = driver.utils.get_device_properties(device)["multiprocessor_count"] active_cta_ratio = min(1, num_ctas / num_sm) active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% dram_bw = get_dram_gbps(backend, device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?) # assume 80% of (following) loads are in L2 cache load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1)) load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1) load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1)) load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1) # total total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024) # loading time in ms load_ms = total_dram / dram_bw + total_l2 / l2_bw # estimate storing time store_bw = dram_bw * 0.6 # :o store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB if SPLIT_K == 1: store_ms = store_c_dram / store_bw else: reduce_bw = store_bw store_ms = store_c_dram / reduce_bw # c.zero_() zero_ms = M * N * 2 / (1024 * 1024) / store_bw store_ms += zero_ms total_time_ms = max(compute_ms, load_ms) + store_ms if debug: print(f'Total time: {total_time_ms}ms, compute time: {compute_ms}ms, ' f'loading time: {load_ms}ms, store time: {store_ms}ms, ' f'Activate CTAs: {active_cta_ratio*100}%') return total_time_ms def early_config_prune(configs, named_args): device = torch.cuda.current_device() capability = torch.cuda.get_device_capability() # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages dtsize = named_args['A'].element_size() dtype = named_args['A'].dtype # 1. make sure we have enough smem pruned_configs = [] for config in configs: kw = config.kwargs BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages max_shared_memory = driver.utils.get_device_properties(device)["max_shared_mem"] required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize if required_shared_memory <= max_shared_memory: pruned_configs.append(config) configs = pruned_configs # Some dtypes do not allow atomic_add if dtype not in [torch.float16, torch.float32]: configs = [config for config in configs if config.kwargs['SPLIT_K'] == 1] # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps) configs_map = {} for config in configs: kw = config.kwargs BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = \ kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], kw['SPLIT_K'], config.num_warps, config.num_stages key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps) if key in configs_map: configs_map[key].append((config, num_stages)) else: configs_map[key] = [(config, num_stages)] pruned_configs = [] for k, v in configs_map.items(): BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k if capability[0] >= 8: # compute cycles (only works for ampere GPUs) mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) mma_cycles = mmas / min(4, num_warps) * 8 ldgsts_latency = 300 # Does this matter? optimal_num_stages = ldgsts_latency / mma_cycles # nearest stages, prefer large #stages nearest = heapq.nsmallest(2, v, key=lambda x: 10 + abs(x[1] - optimal_num_stages) if (x[1] - optimal_num_stages) < 0 else x[1] - optimal_num_stages) for n in nearest: pruned_configs.append(n[0]) else: # Volta & Turing only supports num_stages <= 2 random_config = v[0][0] random_config.num_stages = 2 pruned_configs.append(random_config) return pruned_configs
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,411
quantapix/qnarre
refs/heads/main
/qnarre/models/prophetnet.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import copy import math import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.bert import PreTrained from dataclasses import dataclass from ...modeling_utils import PreTrained log = logging.get_logger(__name__) LIST = [ "microsoft/prophetnet-large-uncased", ] def softmax(hidden_state, dim, onnx_trace=False): if onnx_trace: return F.softmax(hidden_state.float(), dim=dim) else: return F.softmax(hidden_state, dim=dim, dtype=torch.float32) def ngram_attention_bias(sequence_length, ngram, device, dtype): left_block = torch.ones( (ngram, sequence_length, sequence_length), device=device, dtype=dtype ) * float("-inf") right_block = left_block.detach().clone() # create bias for stream_idx in range(ngram): right_block[stream_idx].fill_diagonal_(0, wrap=False) left_block[stream_idx].triu_(-stream_idx + 1) left_block[:, :, 0] = 0 return torch.cat([left_block, right_block], dim=2) def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False): inv_relative_positions = -relative_positions rel_positions_bucket = 0 if is_bidirectional: num_buckets = num_buckets // 2 rel_positions_bucket = ( rel_positions_bucket + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets ) inv_relative_positions = torch.abs(inv_relative_positions) else: inv_relative_positions = torch.max( inv_relative_positions, torch.zeros_like(inv_relative_positions) ) max_exact = num_buckets // 2 is_small = torch.lt(inv_relative_positions, max_exact) val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log( max_distance / max_exact ) * (num_buckets - max_exact) val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int() rel_positions_bucket = rel_positions_bucket + torch.where( is_small, inv_relative_positions.int(), val_if_large ) return rel_positions_bucket def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids): # main stream main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1) main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1) # predicting stream predicting_stream_relative_positions = torch.cat( (position_ids - 1, position_ids), dim=-1 ).unsqueeze(1) predicting_stream_relative_positions = predicting_stream_relative_positions.repeat( 1, position_ids.size(-1), 1 ) predicting_stream_relative_positions = ( predicting_stream_relative_positions - position_ids.unsqueeze(-1) ) # get both position buckets main_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False ) predict_relative_position_buckets = compute_relative_buckets( num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False ) return main_relative_position_buckets, predict_relative_position_buckets @dataclass class ProphetNetSeq2SeqLMOutput(ModelOutput): loss = None logits = None logits_ngram = None caches = None hiddens = None decoder_ngram_hidden_states = None attns = None decoder_ngram_attentions = None crosses = None enc_y = None enc_hiddens = None enc_attns = None @dataclass class ProphetNetSeq2SeqModelOutput(ModelOutput): y last_hidden_state_ngram = None caches = None hiddens = None decoder_ngram_hidden_states = None attns = None decoder_ngram_attentions = None crosses = None enc_y = None enc_hiddens = None enc_attns = None @dataclass class ProphetNetDecoderModelOutput(ModelOutput): y last_hidden_state_ngram = None caches = None hiddens = None hidden_states_ngram = None attns = None ngram_attentions = None crosses = None @dataclass class ProphetNetDecoderLMOutput(ModelOutput): loss = None logits = None logits_ngram = None caches = None hiddens = None hidden_states_ngram = None attns = None ngram_attentions = None crosses = None class ProphetNetPositionalEmbeddings(qc.Embed): def __init__(self, config): self.max_length = config.n_pos super().__init__(config.n_pos, config.d_model, config.PAD) def forward(self, inputs_shape, device, attention_mask=None, caches=None, position_ids=None): assert (position_ids is None) or (self.padding_idx is None) if position_ids is None: if caches is not None: prev_num_input_ids = caches[0][0].shape[2] num_input_ids = inputs_shape[1] + prev_num_input_ids position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * ( int(self.padding_idx + num_input_ids) ) else: if attention_mask is None: attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) position_ids = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() + self.padding_idx position_ids = position_ids.clamp(0, self.max_length - 1) return super().forward(position_ids), position_ids def _forward(self, position_ids): return super().forward(position_ids) class Attention(qc.Module): def __init__( self, config, num_attn_heads, ): super().__init__() d_model = config.d_model self.drop_attn = config.drop_attn self.drop = config.drop self.num_attn_heads = num_attn_heads self.head_dim = d_model // num_attn_heads assert self.head_dim * num_attn_heads == d_model self.key_proj = qc.Linear(d_model, d_model) self.value_proj = qc.Linear(d_model, d_model) self.query_proj = qc.Linear(d_model, d_model) self.out_proj = qc.Linear(d_model, d_model) def _shape(self, tensor, seq_len, bsz): return ( tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hiddens, key_value_states=None, attention_mask=None, layer_head_mask=None, past_key_value=None, output_attentions=False, ): batch_size, tgt_len, d_model = hiddens.size() is_cross_attention = key_value_states is not None assert list(hiddens.size()) == [ batch_size, tgt_len, d_model, ] query_states = self.query_proj(hiddens) / (self.head_dim**0.5) if is_cross_attention and past_key_value is not None: # reuse k,v, crosses key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # crosses key_states = self._shape(self.key_proj(key_value_states), -1, batch_size) value_states = self._shape(self.value_proj(key_value_states), -1, batch_size) else: # self_attention key_states = self._shape(self.key_proj(hiddens), -1, batch_size) value_states = self._shape(self.value_proj(hiddens), -1, batch_size) if is_cross_attention: past_key_value = (key_states, value_states) proj_shape = (batch_size * self.num_attn_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( batch_size * self.num_attn_heads, tgt_len, src_len, ) if attention_mask is not None and attention_mask.dim() == 0: attention_mask = None assert attention_mask is None or attention_mask.size() == ( self.num_attn_heads * batch_size, 1, src_len, ) if attention_mask is not None: # don't attend to padding symbols attn_weights = attn_weights + attention_mask if output_attentions: attn_weights_reshaped = attn_weights.view( batch_size, self.num_attn_heads, tgt_len, src_len ) attn_weights = attn_weights_reshaped.view( batch_size * self.num_attn_heads, tgt_len, src_len ) else: attn_weights_reshaped = None attn_weights = F.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( batch_size, self.num_attn_heads, tgt_len, src_len ) attn_weights = attn_weights.view(batch_size * self.num_attn_heads, tgt_len, src_len) attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped attn_probs = F.drop( attn_weights, p=self.drop_attn, training=self.training, ) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( batch_size * self.num_attn_heads, tgt_len, self.head_dim, ) attn_output = ( attn_output.view(batch_size, self.num_attn_heads, tgt_len, self.head_dim) .transpose(1, 2) .reshape(batch_size, tgt_len, d_model) ) attn_output = self.out_proj(attn_output) attn_output = F.drop(attn_output, p=self.drop, training=self.training) return attn_output, attn_weights_reshaped, past_key_value class ProphetNetFeedForward(qc.Module): def __init__(self, config, ffn_dim): super().__init__() self.act = qu.activation(config.act) self.intermediate = qc.Linear(config.d_model, ffn_dim) self.output = qc.Linear(ffn_dim, config.d_model) self.drop_act = config.drop_act self.drop = config.drop def forward(self, x): y = self.intermediate(x) y = self.act(y) y = F.drop(y, p=self.drop_act, training=self.training) y = self.output(y) y = F.drop(y, p=self.drop, training=self.training) return y class ProphetNetNgramSelfAttention(qc.Module): def __init__(self, config): super().__init__() self.d_model = config.d_model self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.num_attn_heads = config.num_decoder_attention_heads self.drop = config.drop self.drop_attn = config.drop_attn self.head_dim = config.d_model // self.num_attn_heads self.ngram = config.ngram assert self.head_dim * self.num_attn_heads == config.d_model self.key_proj = qc.Linear(config.d_model, config.d_model) self.value_proj = qc.Linear(config.d_model, config.d_model) self.query_proj = qc.Linear(config.d_model, config.d_model) self.out_proj = qc.Linear(config.d_model, config.d_model) self.relative_pos_embeddings = qc.Linear( config.d_model, self.num_buckets * self.num_attn_heads ) self.onnx_trace = False def _shape(self, tensor, seq_len, batch_size): return ( tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hiddens, past_key_value=None, attention_mask=None, layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, ): batch_size, ngram_sequence_length, d_model = hiddens.size() assert list(hiddens.size()) == [ batch_size, ngram_sequence_length, d_model, ] # project query_states = self.query_proj(hiddens) key_states = self.key_proj(hiddens) value_states = self.value_proj(hiddens) # normalize query_states = query_states / (self.head_dim**0.5) # reshape query_states = self._shape(query_states, ngram_sequence_length, batch_size) key_states = self._shape(key_states, -1, batch_size) value_states = self._shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_attn_heads, -1, self.head_dim) query_states = query_states.view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) # chunk into main stream and predict stream hidden_states_list = hiddens.chunk(1 + self.ngram, dim=1) query_states_list = query_states.chunk(1 + self.ngram, dim=1) key_states_list = key_states.chunk(1 + self.ngram, dim=1) value_states_list = value_states.chunk(1 + self.ngram, dim=1) main_hidden_states, hidden_states_predict_list = ( hidden_states_list[0], hidden_states_list[1:], ) main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:] main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:] main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:] # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim) if past_key_value is not None: prev_main_key_states = past_key_value[0].view( batch_size * self.num_attn_heads, -1, self.head_dim ) main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=1) prev_main_value_states = past_key_value[1].view( batch_size * self.num_attn_heads, -1, self.head_dim ) main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=1) # Update cache past_key_value = ( main_key_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), main_value_states.view(batch_size, self.num_attn_heads, -1, self.head_dim), ) # get seq_length of main stream only sequence_length = ngram_sequence_length // (1 + self.ngram) # MAIN-STREAM # main attn weights main_attn_weights = torch.bmm(main_query_states, main_key_states.transpose(1, 2)) main_relative_pos_embeddings = self.get_main_relative_pos_embeddings( main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets ) main_attn_weights = main_attn_weights + main_relative_pos_embeddings if attention_mask is not None: main_attn_weights = main_attn_weights + attention_mask main_attn_probs = softmax( main_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(main_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,) main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view( batch_size, self.num_attn_heads, -1, sequence_length ) main_attn_probs = main_attn_probs.view( batch_size * self.num_attn_heads, -1, sequence_length ) main_attn_probs = F.drop(main_attn_probs, p=self.drop_attn, training=self.training) # project to attn_output main_attn_output = torch.bmm(main_attn_probs, main_value_states) # reshape so that n_heads dim is merged into last `head_dim` axis main_attn_output = ( main_attn_output.view(batch_size, self.num_attn_heads, sequence_length, self.head_dim) .transpose(1, 2) .reshape(batch_size, 1, sequence_length, d_model) ) main_attn_output = self.out_proj(main_attn_output) # PREDICT-STREAM # [ngram, B*head, T, c] predict_query_states = torch.cat(predict_query_states_list, 0).view( self.ngram, -1, sequence_length, self.head_dim ) # [ngram, B*head, 2*T, c] predict_key_states = torch.cat( [torch.cat([main_key_states, key], 1).unsqueeze(0) for key in predict_key_states_list], 0, ) # [ngram, T, B, C] predict_hidden_states = torch.cat(hidden_states_predict_list, 0).view( self.ngram, sequence_length, batch_size, d_model ) # [ngram, B*head, 2*T, c] predict_value_states = torch.cat( [ torch.cat([main_value_states, v_p], 1).unsqueeze(0) for v_p in predict_value_states_list ], 0, ) # [ngram, B*head, T, 2*T] predict_attn_weights = torch.einsum( "nbtc,nbsc->nbts", (predict_query_states, predict_key_states) ) predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings( predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets, ) # [ngram, B*head, T, 2*T] predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings if extended_predict_attention_mask is not None: predict_attn_weights = predict_attn_weights + extended_predict_attention_mask.to( predict_attn_weights.dtype ) predict_attn_probs = softmax( predict_attn_weights, dim=-1, onnx_trace=self.onnx_trace, ).type_as(predict_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,) predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs.view( self.ngram, batch_size, self.num_attn_heads, sequence_length, 2 * sequence_length ) predict_attn_probs = predict_attn_probs.view( self.ngram, batch_size * self.num_attn_heads, sequence_length, 2 * sequence_length ) predict_attn_probs = F.drop(predict_attn_probs, p=self.drop_attn, training=self.training) # project to attention output # [ngram, B*head, T, c] predict_attn_output = torch.einsum( "nbts,nbsc->nbtc", (predict_attn_probs, predict_value_states) ) # reshape so that n_heads dim is merged into last `head_dim` axis # [ngram, B, T, C] predict_attn_output = ( predict_attn_output.view( self.ngram, batch_size, self.num_attn_heads, sequence_length, self.head_dim ) .permute(1, 0, 3, 2, 4) .reshape(batch_size, self.ngram, sequence_length, d_model) ) predict_attn_output = self.out_proj(predict_attn_output) # concat to single attn output # [B, 1+ngram*T, C] attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view( batch_size, -1, d_model ) # reshape into better form for `config.output_attentions` main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1) predict_attn_probs = predict_attn_probs.view( self.ngram, batch_size, self.num_attn_heads, sequence_length, -1 ).transpose(0, 1) attn_output = F.drop(attn_output, p=self.drop, training=self.training) return attn_output, main_attn_probs, predict_attn_probs, past_key_value def get_main_relative_pos_embeddings( self, hiddens, attn_weights, position_ids, main_relative_position_buckets ): # input hiddens [B,T,C], input attn_weights [T*head,T,S], input position_ids [B,T] or [1,1] if main_relative_position_buckets is None: batch_size, sequence_length = hiddens.shape[:2] relative_positions = ( torch.arange(1, attn_weights.shape[-1] + 1) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat( batch_size, sequence_length, 1 ) # [B, T, s] main_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) rel_pos_embeddings = self.relative_pos_embeddings(hiddens) # [B,T,Buckets*head] rel_pos_embeddings = rel_pos_embeddings.view( rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads) ).permute( 0, 3, 1, 2 ) # [B,T,Buckets,head] rel_pos_embeddings = rel_pos_embeddings.reshape( attn_weights.shape[:2] + (-1,) ) # [B*head,T,Buckets] main_relative_position_buckets = ( main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) .view(-1, main_relative_position_buckets.shape[-1]) .long() ) # [B*head*T, T] rel_pos_embeddings = rel_pos_embeddings.reshape( -1, rel_pos_embeddings.size(-1) ) # [B*head*T,Buckets] main_relative_pos_embeddings = torch.gather( rel_pos_embeddings, dim=1, index=main_relative_position_buckets ).view(attn_weights.shape[:2] + (-1,)) return main_relative_pos_embeddings def get_predict_relative_pos_embeddings( self, hiddens, attn_weights, position_ids, predict_relative_position_buckets ): # input hiddens [ngram, T,B,C], input attn_weights [ngram, B*head,T,S], input position_ids [B,T] or [1,1], input predict_relative_position_buckets [B,T, 2*T] or None sequence_length, batch_size = hiddens.shape[1:3] if predict_relative_position_buckets is None: key_sequence_length = attn_weights.shape[-1] assert ( position_ids[0][0] == key_sequence_length - 1 ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)" relative_positions = ( torch.arange(0, key_sequence_length) .unsqueeze(0) .unsqueeze(0) .repeat(batch_size, sequence_length, 1) .to(position_ids.device) ) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat( batch_size, sequence_length, 1 ) predict_relative_position_buckets = compute_relative_buckets( self.num_buckets, self.relative_max_distance, relative_positions, False ) hiddens = hiddens.transpose(1, 2) # [ngram, B, T, C] rel_pos_embeddings = self.relative_pos_embeddings(hiddens).view( hiddens.shape[:-1] + (self.num_buckets, self.num_attn_heads) ) # [ngram, B, T, bucket, head] rel_pos_embeddings = rel_pos_embeddings.permute(0, 1, 4, 2, 3).reshape( self.ngram * batch_size * self.num_attn_heads, sequence_length, -1 ) # [ngram*B*head, T, bucket] predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0).repeat( self.ngram, 1, self.num_attn_heads, 1 ) # [ngram, B, head*T, S] rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) predict_relative_position_buckets = predict_relative_position_buckets.view( -1, predict_relative_position_buckets.size(-1) ).long() # [ngram*B*head*T, S] predict_relative_pos_embeddings = torch.gather( rel_pos_embeddings, dim=1, index=predict_relative_position_buckets ).view( self.ngram, batch_size * self.num_attn_heads, sequence_length, -1 ) # [ngram, B*head, T, S] return predict_relative_pos_embeddings class EncLayer(qc.Module): def __init__(self, config): super().__init__() # 1st residual block self.self_attn = Attention(config, config.num_encoder_attention_heads) self.self_attn_layer_norm = LayerNorm(config.d_model) # 2nd residual block self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.d_model) def forward( self, hiddens, attention_mask, layer_head_mask, output_attentions=False, ): # 1st residual block attention_output, attn_weights, _ = self.self_attn( hiddens=hiddens, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hiddens = self.self_attn_layer_norm(attention_output + hiddens) # 2nd residual block feed_forward_output = self.feed_forward(hiddens) hiddens = self.feed_forward_layer_norm(feed_forward_output + hiddens) outputs = (hiddens,) if output_attentions: outputs += (attn_weights,) return outputs class DecLayer(qc.Module): def __init__(self, config): super().__init__() # 1st residual block self.self_attn = ProphetNetNgramSelfAttention(config) self.self_attn_layer_norm = LayerNorm(config.d_model) # 2nd residual block if config.add_cross_attention: self.cross_attn = Attention(config, config.num_decoder_attention_heads) self.cross_attn_layer_norm = LayerNorm(config.d_model) # 3rd residual block self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.d_model) def forward( self, hiddens, attention_mask=None, enc_hiddens=None, encoder_attn_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, past_key_value=None, y_cache=True, output_attentions=False, ): # 1st residual block # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None ( ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value, ) = self.self_attn( hiddens=hiddens, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, ) hiddens = self.self_attn_layer_norm(hiddens + ngram_attention_output) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attn_weights = None if enc_hiddens is not None: # 2nd residual block attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn( hiddens=hiddens, key_value_states=enc_hiddens, attention_mask=encoder_attn_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hiddens = self.cross_attn_layer_norm(attention_output + hiddens) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # 3rd residual block feed_forward_output = self.feed_forward(hiddens) hiddens = self.feed_forward_layer_norm(feed_forward_output + hiddens) outputs = (hiddens,) if output_attentions: outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights) if y_cache: outputs += (present_key_value,) return outputs class Encoder(PreTrained): def __init__(self, config, word_embeddings: qc.Embed = None): super().__init__(config) self.word_embeddings = ( word_embeddings if word_embeddings is not None else qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD) ) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.embeddings_layer_norm = LayerNorm(config.d_model) self.layers = nn.ModuleList([EncLayer(config) for _ in range(config.num_encoder_layers)]) self.gradient_checkpointing = False def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError("Either input_ids or inputs_embeds has to be passed.") elif input_ids is not None and inputs_embeds is not None: raise ValueError("Make sure to only pass input_ids or inputs_embeds.") elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # prepare attention mask if attention_mask is not None: extended_attention_mask = ( 1.0 - attention_mask[:, None, :].repeat(self.config.num_encoder_attention_heads, 1, 1) ) * -10000.0 extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) else: extended_attention_mask = None position_embeddings, position_ids = self.position_embeddings( inputs_embeds.shape[:2], inputs_embeds.device ) hiddens = inputs_embeds + position_embeddings hiddens = self.embeddings_layer_norm(hiddens) hiddens = F.drop(hiddens, p=self.config.drop, training=self.training) enc_hiddens = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: enc_hiddens = enc_hiddens + (hiddens,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hiddens, extended_attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hiddens, attention_mask=extended_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hiddens = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: enc_hiddens = enc_hiddens + (hiddens,) if not return_dict: return tuple(v for v in [hiddens, enc_hiddens, all_attentions] if v is not None) return qo.Base( y=hiddens, hiddens=enc_hiddens, attns=all_attentions, ) class Decoder(PreTrained): def __init__(self, config, word_embeddings: qc.Embed = None): super().__init__(config) self.ngram = config.ngram self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.drop = config.drop self.max_target_positions = config.n_pos self.word_embeddings = ( word_embeddings if word_embeddings is not None else qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD) ) self.position_embeddings = ProphetNetPositionalEmbeddings(config) self.ngram_embeddings = qc.Embed(self.ngram, config.d_model, None) self.layers = nn.ModuleList([DecLayer(config) for _ in range(config.n_dec_lays)]) self.embeddings_layer_norm = LayerNorm(config.d_model) self.gradient_checkpointing = False def forward( self, input_ids=None, attention_mask=None, enc_hiddens=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, caches=None, inputs_embeds=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): y_cache = y_cache if y_cache is not None else self.config.y_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError( "Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed." ) elif input_ids is not None and inputs_embeds is not None: raise ValueError( "Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`." ) elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) batch_size, sequence_length = inputs_embeds.shape[:2] main_stream_pos_embed, position_ids = self.position_embeddings( (batch_size, sequence_length), device=inputs_embeds.device, caches=caches, ) if caches is not None: main_relative_position_buckets, predict_relative_position_buckets = None, None else: ( main_relative_position_buckets, predict_relative_position_buckets, ) = self.compute_buffered_relative_buckets(position_ids) predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) # add position embeddings hiddens = inputs_embeds + main_stream_pos_embed ngram_embeddings = self.ngram_embeddings.weight # prepare attention mask if caches is not None: assert ( hiddens.size(1) == 1 ), "At the moment `y_cache` is only supported for `decoder_input_ids` of length 1" ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1) for ngram in range(self.ngram) ] extended_attention_mask = None extended_predict_attention_mask = None else: ngram_hidden_states = [ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram) ] extended_attention_mask = self.prepare_attention_mask(hiddens, attention_mask) extended_predict_attention_mask = self.prepare_predict_attention_mask( hiddens, attention_mask ) # prepare encoder attention mask if encoder_attention_mask is not None: extended_encoder_attention_mask = ( 1.0 - encoder_attention_mask[:, None, :].repeat( self.config.num_decoder_attention_heads, 1, 1 ) ) * -10000.0 extended_encoder_attention_mask = extended_encoder_attention_mask.to( inputs_embeds.dtype ) else: extended_encoder_attention_mask = None hiddens = torch.cat([hiddens] + ngram_hidden_states, 1) if self.embeddings_layer_norm: hiddens = self.embeddings_layer_norm(hiddens) hiddens = F.drop(hiddens, p=self.drop, training=self.training) # init attns, hiddens and cache with empty tuples all_main_stream_hidden_states = () if output_hidden_states else None all_ngram_stream_hidden_states = ( () if output_hidden_states and self.config.ngram > 0 else None ) all_main_stream_attns = () if output_attentions else None all_ngram_stream_attns = () if output_attentions else None all_cross_attns = () if output_attentions and self.config.add_cross_attention else None present_key_values = () if y_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip( [head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"] ): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: # grad cannot be kept because tensor is sliced all_main_stream_hidden_states += (hiddens[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hiddens[:, sequence_length:],) past_key_value = caches[idx] if caches is not None else None if self.gradient_checkpointing and self.training: if y_cache: log.warning( "`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..." ) y_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, y_cache, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hiddens, extended_attention_mask, enc_hiddens, extended_encoder_attention_mask, (head_mask[idx] if head_mask is not None else None), (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), extended_predict_attention_mask, main_relative_position_buckets, predict_relative_position_buckets, position_ids, None, ) else: layer_outputs = decoder_layer( hiddens, attention_mask=extended_attention_mask, enc_hiddens=enc_hiddens, encoder_attn_mask=extended_encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, past_key_value=past_key_value, y_cache=y_cache, output_attentions=output_attentions, ) hiddens = layer_outputs[0] if y_cache: present_key_values += (layer_outputs[4 if output_attentions else 1],) if output_attentions: all_main_stream_attns += (layer_outputs[1],) all_ngram_stream_attns += (layer_outputs[2],) if self.config.add_cross_attention: all_cross_attns += (layer_outputs[3],) if output_hidden_states: all_main_stream_hidden_states += (hiddens[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hiddens[:, sequence_length:],) # split y for return y = hiddens[:, :sequence_length] last_hidden_state_ngram = hiddens[:, sequence_length:] if self.config.ngram > 0 else None if not return_dict: return tuple( v for v in [ y, last_hidden_state_ngram, present_key_values, all_main_stream_hidden_states, all_ngram_stream_hidden_states, all_main_stream_attns, all_ngram_stream_attns, all_cross_attns, ] if v is not None ) return ProphetNetDecoderModelOutput( y=y, last_hidden_state_ngram=last_hidden_state_ngram, caches=present_key_values, hiddens=all_main_stream_hidden_states, hidden_states_ngram=all_ngram_stream_hidden_states, attns=all_main_stream_attns, ngram_attentions=all_ngram_stream_attns, crosses=all_cross_attns, ) def compute_buffered_relative_buckets(self, position_ids): batch_size, sequence_length = position_ids.shape position_ids = ( torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) ) main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets( self.num_buckets, self.relative_max_distance, position_ids ) # buffer relative buckets main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat( batch_size, 1, 1 ) predict_relative_buckets = torch.cat( [ predict_relative_buckets[:, :sequence_length, :sequence_length], predict_relative_buckets[ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length, ], ], 2, ).repeat(batch_size, 1, 1) return main_relative_buckets, predict_relative_buckets def prepare_attention_mask(self, hiddens, attention_mask): batch_size, seq_length = hiddens.shape[:2] causal_mask = torch.full( (seq_length, seq_length), -float("inf"), dtype=hiddens.dtype, device=hiddens.device, ) causal_mask = torch.triu(causal_mask, 1) extended_causal_mask = causal_mask[:seq_length, :seq_length][None, :, :].expand( (batch_size,) + causal_mask.shape ) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, :]) * -10000.0 extended_attention_mask = extended_causal_mask + extended_attention_mask else: extended_attention_mask = extended_causal_mask return extended_attention_mask.repeat(self.config.num_decoder_attention_heads, 1, 1).to( hiddens.dtype ) def prepare_predict_attention_mask(self, hiddens, attention_mask): batch_size, seq_length = hiddens.shape[:2] predict_causal_mask = ngram_attention_bias( self.max_target_positions, self.ngram, hiddens.device, hiddens.dtype ) predict_causal_mask = torch.cat( [ predict_causal_mask[:, :seq_length, :seq_length], predict_causal_mask[ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length, ], ], dim=-1, ) extended_predict_causal_mask = predict_causal_mask[:, None, :, :].expand( predict_causal_mask.shape[:1] + (batch_size,) + predict_causal_mask.shape[1:] ) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[None, :, None, :]) * -10000.0 extended_attention_mask = extended_attention_mask.expand( (self.ngram, batch_size, seq_length, seq_length) ) # predicted stream attention_mask should always be 0 extended_attention_mask = torch.cat( [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1 ) extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask else: extended_predict_attention_mask = extended_predict_causal_mask return extended_predict_attention_mask.repeat( 1, self.config.num_decoder_attention_heads, 1, 1 ).to(hiddens.dtype) class Model(PreTrained): def __init__(self, config): super().__init__(config) self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD) encoder_config = copy.deepcopy(config) encoder_config.is_enc_dec = False encoder_config.y_cache = False self.encoder = Encoder(encoder_config, self.word_embeddings) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_enc_dec = False self.decoder = Decoder(decoder_config, self.word_embeddings) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, caches=None, inputs_embeds=None, decoder_inputs_embeds=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): y_cache = y_cache if y_cache is not None else self.config.y_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # decoder outputs consists of (dec_features, caches, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, enc_hiddens=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, caches=caches, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, y_cache=y_cache, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ProphetNetSeq2SeqModelOutput( y=decoder_outputs.y, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, caches=decoder_outputs.caches, hiddens=decoder_outputs.hiddens, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, attns=decoder_outputs.attns, decoder_ngram_attentions=decoder_outputs.ngram_attentions, crosses=decoder_outputs.crosses, enc_y=encoder_outputs.y, enc_hiddens=encoder_outputs.hiddens, enc_attns=encoder_outputs.attns, ) class ForCondGen(PreTrained): def __init__(self, config): super().__init__(config) self.prophetnet = Model(config) self.padding_idx = config.PAD self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = qc.Linear(config.d_model, config.s_vocab, bias=False) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, caches=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) outputs = self.prophetnet( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, caches=caches, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = ( decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] ) predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None # To use .view in loss computation, make sure that logits is contiguous. if not logits.is_contiguous(): logits = logits.contiguous() loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return ( (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] ) else: return ProphetNetSeq2SeqLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, caches=outputs.caches, hiddens=outputs.hiddens, decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, attns=outputs.attns, decoder_ngram_attentions=outputs.decoder_ngram_attentions, crosses=outputs.crosses, enc_y=outputs.enc_y, enc_hiddens=outputs.enc_hiddens, enc_attns=outputs.enc_attns, ) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_( ignore_index ) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = F.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss class ForCausal(PreTrained): def __init__(self, config): # set config for CLM config = copy.deepcopy(config) config.is_decoder = True config.is_enc_dec = False super().__init__(config) self.prophetnet = ProphetNetDecoderWrapper(config) self.padding_idx = config.PAD self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = qc.Linear(config.d_model, config.s_vocab, bias=False) def forward( self, input_ids=None, attention_mask=None, enc_hiddens=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, caches=None, inputs_embeds=None, labels=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, caches, dec_hidden, dec_attn) outputs = self.prophetnet.decoder( input_ids=input_ids, attention_mask=attention_mask, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, caches=caches, inputs_embeds=inputs_embeds, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) batch_size, sequence_length = ( input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] ) predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) return ( (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] ) else: return ProphetNetDecoderLMOutput( loss=loss, logits=logits, logits_ngram=logits_ngram, caches=outputs.caches, hiddens=outputs.hiddens, hidden_states_ngram=outputs.hidden_states_ngram, attns=outputs.attns, ngram_attentions=outputs.ngram_attentions, crosses=outputs.crosses, ) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_( ignore_index ) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = F.log_softmax( logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32, ) loss = F.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss class ProphetNetDecoderWrapper(PreTrained): def __init__(self, config): super().__init__(config) self.decoder = Decoder(config) def forward(self, *args, **kw): return self.decoder(*args, **kw)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,412
quantapix/qnarre
refs/heads/main
/qnarre/models/gpt2.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf # https://openai.com/blog/better-language-models/ from dataclasses import dataclass import torch from torch import nn from torch.nn import functional as F from torch.cuda.amp.autocast_mode import autocast from transformers.utils import logging from torch.utils.checkpoint import checkpoint from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import forward as qf from ..core import mlp as qm from ..core import attention as qa from ..core.mlp import Classifier from ..prep.config.gpt2 import PreTrained log = logging.get_logger(__name__) class LMHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.s_vocab, bias=False, **kw) def forward(self, x, labels=None, **kw): ys = self.model(x, **kw) y = self.proj(ys[0]) loss = None if labels is not None: sl = y[..., :-1, :].contiguous() ls = labels[..., 1:].contiguous() loss = nn.CrossEntropyLoss()(sl.view(-1, sl.size(-1)), ls.view(-1)) ys = (y,) + ys[1:] + (loss,) return qo.LossCrosses(*ys) @dataclass class Output(qc.Output): logits: tuple = None mc_logits: tuple = None attns: tuple = None caches: tuple = None hiddens: tuple = None loss: tuple = None mc_loss: tuple = None class DualHead(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) cfg.n_labels = 1 self.model = Model(**kw) self.sum = qc.SeqSummary(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.s_vocab, bias=False, **kw) def forward(self, x, mc_token_ids=None, labels=None, mc_labels=None, **kw): ys = self.model(x, **kw) y = self.proj(ys[0]) loss = None if labels is not None: sl = y[..., :-1, :].contiguous() ls = labels[..., 1:].contiguous() loss = nn.CrossEntropyLoss()(sl.view(-1, sl.size(-1)), ls.view(-1)) mc_y = self.sum(ys[0], mc_token_ids).squeeze(-1) mc_loss = None if mc_labels is not None: mc_loss = nn.CrossEntropyLoss()(mc_y.view(-1, mc_y.size(-1)), mc_labels.view(-1)) ys = (y, mc_y) + ys[1:] + (loss, mc_loss) return Output(*ys) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.n_embed, cfg.n_labels, bias=False, **kw) forward = qf.forward_seq def post_proj(self, x): cfg = self.cfg b = (x.shape[:2] if x is not None else x_emb.shape[:2])[0] if cfg.PAD is None: n = -1 else: assert b == 1 n = -1 if x is None else torch.ne(x, cfg.PAD).sum(-1) - 1 return x[torch.arange(b, device=self.device), n] class ForTokClass(PreTrained): def __init__(self, drop_proj=0.1, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(drop_proj=drop_proj, **kw) forward = qf.forward_tok class Model(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.tok_emb = qc.Embed(cfg.s_vocab, cfg.d_model, **kw) self.pos_emb = qc.Embed(cfg.n_pos, cfg.d_model, **kw) self.lays = qc.Stack([Layer(lay_i=i, **kw) for i in range(cfg.n_lays)]) self.norm = qc.LayerNorm(cfg.d_model, cfg.eps, **kw) self.drop = qc.Dropout(cfg.drop_embed, **kw) def forward( self, x, cache=None, enc_m=None, enc=None, head_m=None, mask=None, pos=None, typ=None, x_emb=None, **kw, ): cfg = self.cfg if x is None: s, d = x_emb.size()[:-1], x_emb.device else: assert x_emb is None s, d = x.size(), x.device x = x.view(-1, s[-1]) if x_emb is None: x_emb = self.tok_emb(x) if cache is None: c_len = 0 cache = tuple([None] * len(self.lays)) else: c_len = cache[0][0].size(-2) if mask is not None: mask = self.get_mask(mask.view(s[0], -1), s, d) if pos is not None: pos = pos.view(-1, s[-1]) else: pos = ( torch.arange(c_len, s[-1] + c_len, dtype=torch.long, device=d) .unsqueeze(0) .view(-1, s[-1]) ) if typ is not None: typ = typ.view(-1, s[-1]) if cfg.add_cross and enc is not None: if enc_m is None: enc_m = torch.ones(enc.size()[:2], device=d) enc_m = self.invert_mask(enc_m) else: enc_m = None head_m = self.get_head_m(head_m, cfg.n_lays) y = x_emb + self.pos_emb(pos) if typ is not None: y = y + self.tok_emb(typ) y = self.drop(y) attns = caches = crosses = hiddens = () for i, (lay, c) in enumerate(zip(self.lays, cache)): hiddens += (y,) kw.update(enc_m=enc_m, enc=enc, head_m=head_m[i], mask=mask) if self.grad_checkpoint and self.training: def create_forward(x): def forward(*xs): return x(*xs, cache=c) return forward ys = checkpoint(create_forward(lay), y, **kw) else: ys = lay(y, cache=c, **kw) y = ys[0] attns += (ys[2],) if cfg.add_cross: crosses += (ys[3],) caches += (ys[1],) y = self.norm(y).view(s + (y.size(-1),)) hiddens += (y,) return qo.CachesCrosses(y, attns, caches, crosses, hiddens) class Layer(qc.Module): hs = qc.Hypers({"d_model", "add_cross", "n_inner"}) def __init__(self, lay_i, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) d = cfg.d_model self.attn = Attention(lay_i=lay_i, **kw) self.norm_attn = qc.LayerNorm(d, **kw) if cfg.add_cross: self.cross = Attention(is_cross=True, lay_i=lay_i, **kw) self.norm_cross = qc.LayerNorm(d, **kw) self.proj = qm.GPT(cfg.n_inner if cfg.n_inner is not None else 4 * d, **kw) self.norm = qc.LayerNorm(d, **kw) def forward(self, x, cache=None, enc_m=None, enc=None, head_m=None, mask=None, **kw): y = self.norm_attn(x) y, a, kv = self.attn(y, cache=cache, head_m=head_m, mask=mask, **kw) y = x + y a2 = None if enc is not None: x = y y = self.norm_cross(y) y, a2, kv2 = self.cross(y, enc_m=enc_m, enc=enc, head_m=head_m, mask=mask, **kw) y = x + y kv = kv + kv2 x = y return x + self.proj(self.norm(y)), a, a2, kv class Attention(qc.Module): hs = qc.Hypers({"d_model", "drop_attn", "drop", "n_heads", "n_pos", "scale", "scale_by_inv"}) def __init__(self, is_cross=False, lay_i=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) self.is_cross = is_cross self.lay_i = lay_i cfg = self.get_cfg(kw) d, h = cfg.d_model, cfg.n_heads assert d % h == 0 cfg.s_head = int(d / h) if is_cross: self.attn = qc.Conv1D(2 * d, d, **kw) self.query = qc.Conv1D(d, d, **kw) else: self.attn = qc.Conv1D(3 * d, d, **kw) self.proj = qc.Conv1D(d, d, **kw) self.drop_attn = qc.Dropout(cfg.drop_attn, **kw) self.drop = qc.Dropout(cfg.drop, **kw) p, t = cfg.n_pos, torch.bool self.register_buffer("bias", torch.tril(torch.ones((p, p), dtype=t)).view(1, 1, p, p)) # self.register_buffer("bias_m", torch.tensor(-1e4)) def forward(self, x, cache=None, enc_m=None, enc=None, head_m=None, mask=None, **kw): cfg = self.cfg if enc is None: q, k, v = self.attn(x).split(cfg.d_model, dim=2) else: q = self.query(x) k, v = self.attn(enc).split(cfg.d_model, dim=2) mask = enc_m q = self.split_heads(q) k = self.split_heads(k) v = self.split_heads(v) if cache is not None: k = torch.cat((cache[0], k), dim=-2) v = torch.cat((cache[1], v), dim=-2) if cfg.reorder: ys = self.reordered(q, k, v, mask, head_m) else: ys = self.scores(q, k, v, mask, head_m) y = self.join_heads(ys[0]) y = (self.drop(self.proj(y)),) y += ys[1:] + ((k, v),) return y split_heads = qa.split_heads join_heads = qa.join_heads def scores(self, q, k, v, mask, head_m, **kw): cfg = self.cfg a = torch.matmul(q, k.transpose(-1, -2)) if cfg.scale: a = a / torch.full([], v.size(-1) ** 0.5, dtype=a.dtype, device=a.device) if cfg.scale_by_inv: a = a / float(self.lay_i + 1) if not self.is_cross: n_q, n_k = q.size(-2), k.size(-2) causal = self.bias[:, :, n_k - n_q : n_k, :n_k].bool() m = torch.tensor(torch.finfo(a.dtype).min, dtype=a.dtype).to(a.device) a = torch.where(causal, a, m) if mask is not None: a = a + mask a = self.drop_attn(F.softmax(a, dim=-1).type(v.dtype)) if head_m is not None: a = a * head_m return torch.matmul(a, v), a def reordered(self, q, k, v, mask, head_m, **kw): cfg = self.cfg b, h, n_q, d = q.size() _, _, n_k, _ = k.size() a = torch.empty(b * h, n_q, n_k, dtype=torch.float32, device=q.device) alpha = 1.0 if cfg.scale: alpha /= float(v.size(-1)) ** 0.5 if cfg.scale_by_inv: alpha /= float(self.lay_i + 1) with autocast(enabled=False): q, k = q.reshape(-1, n_q, d), k.transpose(-1, -2).reshape(-1, d, n_k) a = torch.baddbmm(a, q.float(), k.float(), beta=0, alpha=alpha) a = a.reshape(b, h, n_q, n_k) if not self.is_cross: causal = self.bias[:, :, n_k - n_q : n_k, :n_k].bool() m = torch.tensor(torch.finfo(a.dtype).min, dtype=a.dtype).to(a.device) a = torch.where(causal, a, m) if mask is not None: a = a + mask a = self.drop_attn(F.softmax(a, dim=-1).type(v.dtype)) if head_m is not None: a = a * head_m return torch.matmul(a, v), a
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,413
quantapix/qnarre
refs/heads/main
/qnarre/core/attention.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import math import torch import numpy as np from torch import nn from torch.nn import functional as F from torch.nn.parameter import Parameter, UninitializedBuffer from .. import core as qc from . import utils as qu def split_heads(self, x, k=False): cfg = self.cfg y = x.view(x.size()[:-1] + (cfg.n_heads, cfg.s_head)) if k: return y.permute(0, 2, 3, 1) else: return y.permute(0, 2, 1, 3) def join_heads(self, x): y = x.permute(0, 2, 1, 3).contiguous() cfg = self.cfg return y.view(y.size()[:-2] + (cfg.n_heads * cfg.s_head,)) class Attention(qc.Module): hs = qc.Hypers( ["d_model", "n_heads", "d_k", "d_v"], { "add_b_kv": False, "add_zero_attn": False, "batch_first": False, "bias": True, "drop": 0.0, }, ) w_pack, w_q, w_k, w_v = None b_pack, b_q, b_k, b_v = None def __init__(self, n_heads, d_model, hs=[], **kw): if n_heads is not None: kw.update(n_heads=n_heads) if d_model is not None: kw.update(d_model=d_model) super().__init__([self.hs] + hs, **kw) cfg = self.cfg n, h = cfg.n_heads, cfg.d_model assert h % n == 0 d_k = cfg.d_k if cfg.d_k is not None else h d_v = cfg.d_v if cfg.d_v is not None else h self.pack = self.d_k == h and self.d_v == h kw = {"device": cfg.device, "dtype": cfg.dtype} if self.pack: self.w_pack = Parameter(torch.empty((3 * h, h), **kw)) self.register_parameter("w_q", None) self.register_parameter("w_k", None) self.register_parameter("w_v", None) else: self.register_parameter("w_pack", None) self.w_q = Parameter(torch.empty((h, h), **kw)) self.w_k = Parameter(torch.empty((h, d_k), **kw)) self.w_v = Parameter(torch.empty((h, d_v), **kw)) if cfg.bias: self.b_pack = Parameter(torch.empty(3 * h, **kw)) else: self.register_parameter("b_pack", None) self.out = Linear(h, h, bias=cfg.bias, **kw) if cfg.add_b_kv: self.b_k = Parameter(torch.empty((1, 1, h), **kw)) self.b_v = Parameter(torch.empty((1, 1, h), **kw)) else: self.register_parameter("b_k", None) self.register_parameter("b_v", None) def build(self, _): if not self.is_built(): with torch.no_grad(): self.reset_params() def reset_params(self): if self.pack: nn.init.xavier_uniform_(self.w_pack) else: nn.init.xavier_uniform_(self.w_q) nn.init.xavier_uniform_(self.w_k) nn.init.xavier_uniform_(self.w_v) if self.b_pack is not None: nn.init.constant_(self.b_pack, 0.0) nn.init.constant_(self.out.bias, 0.0) if self.b_k is not None: nn.init.xavier_normal_(self.b_k) if self.b_v is not None: nn.init.xavier_normal_(self.b_v) def forward(self, q, k, v, mask=None, k_mask=None, need_weights=True, average=True): cfg = self.cfg is_batched = q.dim() == 3 if cfg.batch_first and is_batched: q, k, v = [x.transpose(1, 0) for x in (q, k, v)] if self.pack: y, w = self.multi_head_attention_forward( q, k, v, mask, k_mask, self.add_zero_attn, need_weights=need_weights, average=average, ) else: y, w = self.multi_head_attention_forward( q, k, v, self.add_zero_attn, mask, k_mask, need_weights=need_weights, average=average, ) if self.batch_first and is_batched: return y.transpose(1, 0), w else: return y, w def project_packed(self, q, k, v): w, b = self.w_pack, self.b_pack if k is v: if q is k: return F.linear(q, w, b).chunk(3, dim=-1) else: H = q.size(-1) w_q, w_kv = w.split([H, H * 2]) if b is None: b_q = b_kv = None else: b_q, b_kv = b.split([H, H * 2]) return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1) else: w_q, w_k, w_v = w.chunk(3) if b is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = b.chunk(3) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def project(self, q, k, v, bs): w_q, w_k, w_v = self.w_q, self.w_k, self.w_v H, Dk, Dv = q.size(-1), k.size(-1), v.size(-1) assert w_q.shape == (H, H) and w_k.shape == (H, Dk) and w_v.shape == (H, Dv) b_q, b_k, b_v = bs assert b_q is None or b_q.shape == (H,) assert b_k is None or b_k.shape == (H,) assert b_v is None or b_v.shape == (H,) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def attention(self, q, k, v, mask=None): cfg = self.cfg B, Nt, H = q.shape q = q / math.sqrt(H) w = torch.bmm(q, k.transpose(-2, -1)) if mask is not None: w += mask w = softmax(w, dim=-1) if self.training and cfg.dropout_p > 0.0: w = drop(w, p=self.drop) y = torch.bmm(w, v) return y, w def is_batched(self, q, k, v, k_mask, mask): if q.dim() == 3: assert k.dim() == 3 and v.dim() == 3 if k_mask is not None: assert k_mask.dim() == 2 if mask is not None: assert mask.dim() in (2, 3) return True assert q.dim() == 2 assert k.dim() == 2 and v.dim() == 2 if k_mask is not None: assert k_mask.dim() == 1 if mask is not None: assert mask.dim() in (2, 3) if mask.dim() == 3: assert mask.shape == (self.cfg.n_heads, q.shape[0], k.shape[0]) return False def multi_head_attention_forward( self, q, k, v, mask=None, k_mask=None, add_zero_attn=None, need_weights=True, static_k=None, static_v=None, average=True, ): if not self.is_batched(q, k, v, k_mask, mask): q = q.unsqueeze(1) k = k.unsqueeze(1) v = v.unsqueeze(1) if k_mask is not None: k_mask = k_mask.unsqueeze(0) cfg = self.cfg h, n = cfg.d_model, cfg.n_heads b_q, b_k, b_v = self.b_q, self.b_k, self.b_v if self.pack: assert k.shape == v.shape q, k, v = self.project_packed(q, k, v) else: assert k.shape[:2] == v.shape[:2] if self.b_pack is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = self.b_pack.chunk(3) q, k, v = self.project(q, k, v, (b_q, b_k, b_v)) d_tgt, d_batch, _ = q.shape d_src, _, _ = k.shape if mask is not None: assert mask.is_floating_point() or mask.dtype == torch.bool if mask.dim() == 2: assert mask.shape == (d_tgt, d_src) mask = mask.unsqueeze(0) else: assert mask.shape == (d_batch * n, d_tgt, d_src) if b_k is not None and b_v is not None: assert static_k is None assert static_v is None k = torch.cat([k, b_k.repeat(1, d_batch, 1)]) v = torch.cat([v, b_v.repeat(1, d_batch, 1)]) if mask is not None: mask = pad(mask, (0, 1)) if k_mask is not None: k_mask = pad(k_mask, (0, 1)) else: assert b_k is None assert b_v is None d_head = h // n q = q.contiguous().view(d_tgt, d_batch * n, d_head).transpose(0, 1) if static_k is None: k = k.contiguous().view(k.shape[0], d_batch * n, d_head).transpose(0, 1) else: assert static_k.size(0) == d_batch * n assert static_k.size(2) == d_head k = static_k if static_v is None: v = v.contiguous().view(v.shape[0], d_batch * n, d_head).transpose(0, 1) else: assert static_v.size(0) == d_batch * n assert static_v.size(2) == d_head v = static_v if add_zero_attn: zero_attn_shape = (d_batch * n, 1, d_head) k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1) v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1) if mask is not None: mask = pad(mask, (0, 1)) if k_mask is not None: k_mask = pad(k_mask, (0, 1)) d_src = k.size(1) if k_mask is not None: assert k_mask.shape == (d_batch, d_src) k_mask = ( k_mask.view(d_batch, 1, 1, d_src) .expand(-1, n, -1, -1) .reshape(d_batch * n, 1, d_src) ) if mask is None: mask = k_mask elif mask.dtype == torch.bool: mask = mask.logical_or(k_mask) else: mask = mask.masked_fill(k_mask, float("-inf")) if mask is not None and mask.dtype == torch.bool: mask = torch.zeros_like(mask, dtype=q.dtype).masked_fill_(mask, float("-inf")) y, w = _scaled_dot_product_attention(q, k, v, mask) y = y.transpose(0, 1).contiguous().view(d_tgt, d_batch, h) y = F.linear(y, self.out.weight, self.out.bias) if need_weights: w = w.view(d_batch, n, d_tgt, d_src) if average: w = w.sum(dim=1) / n return y, w else: return y, None class Attend(qc.Module): hs = qc.Hypers( [ "d_attn_k", "d_attn_v", "d_attn", "d_model", "drop_attn", "drop", "len_mem", "n_heads", "pos_type", "proxim_bias", ], {}, ) v_w = pos_tim = proxim_b = None def __init__(self, owner, hs=[], **kw): super().__init__([self.hs] + hs, **kw) self.owner = owner self.pre = owner.pre self.post = owner.post self.pos_x_b = owner.pos_x_b self.pos_p_b = owner.pos_p_b cfg = self.cfg h, n = cfg.d_model, cfg.n_heads assert h % n == 0 k = cfg.d_attn_k or cfg.d_attn or h assert k % n == 0 self.scale = 1 / (k**0.5) v = cfg.d_attn_v or k assert v % n == 0 kw = {"dtype": cfg.dtype, "device": cfg.device} if k == v: self.qkv_w = Parameter(torch.empty((h, n * k), **kw)) else: self.qk_w = Parameter(torch.empty((h, n * k), **kw)) self.v_w = Parameter(torch.empty((h, n * v), **kw)) self.out_w = Parameter(torch.empty((n * v, h), **kw)) if cfg.pos_type == "relative": self.pos_tim = PosTiming(**kw) self.pos_w = Parameter(torch.empty((h, n * k), **kw)) if self.pos_x_b is None: self.pos_x_b = Parameter(torch.empty((n, k), **kw)) if self.pos_p_b is None: self.pos_p_b = Parameter(torch.empty((n, k), **kw)) if cfg.proxim_bias: self.proxim_b = Proximity(**kw) def build(self, x): if not self.is_built(): cfg = self.cfg with torch.no_grad(): e = x.shape[1] + cfg.len_mem if cfg.len_mem else 0 if cfg.pos_type == "relative": self.pos_tim.materialize(cfg.d_model, e) if cfg.proxim_bias: self.proxim_b.materialize(e) def reset_params(self): if self.is_built(): a = math.sqrt(5) if self.v_w is None: nn.init.kaiming_uniform_(self.qkv_w, a=a) else: nn.init.kaiming_uniform_(self.qk_w, a=a) nn.init.kaiming_uniform_(self.v_w, a=a) if self.pos_tim is not None: nn.init.kaiming_uniform_(self.pos_w, a=a) if self.owner.pos_x_b is None: nn.init.kaiming_uniform_(self.pos_x_b, a=a) if self.owner.pos_p_b is None: nn.init.kaiming_uniform_(self.pos_p_b, a=a) split_heads = split_heads join_heads = join_heads def forward(self, x, mask=None): x, ctx = x[0], x[1] if len(x) > 1 else None xlen = x.shape[1] y = x if ctx is None else torch.cat([ctx, x], dim=1) y = self.pre([y, y]) if self.v_w is None: y = v = torch.einsum("bih,hk->bik", y, self.qkv_w) else: y = torch.einsum("bih,hk->bik", y, self.qk_w) v = torch.einsum("bih,hv->biv", v, self.v_w) q = self.split_heads(y[:, -xlen:, :]) k = self.split_heads(y) if self.pos_tim is None: qk = torch.einsum("bnik,bnjk->bnij", q, k) else: qk = self.to_qk_with_pos(q, k) v = self.split_heads(v) y = self.to_scores(qk, mask, v) y = self.join_heads(y) y = torch.einsum("biv,vh->bih", y, self.out_w) y = self.post([x, y]) return y def to_qk_with_pos(self, q, k): b = self.pos_x_b[:, None, :, None] y = torch.einsum("bnik,bnjk->bnij", q + b, k) p = torch.einsum("ih,hk->ik", self.pos_tim, self.pos_w) # fmt: off p = self.split_heads(p)[None,] # fmt: on b = self.pos_p_b[:, None, :, None] p = torch.einsum("bnik,bnjk->bnij", q + b, p) y += self.shift(p) return y def shift(self, x): s = x.shape y = torch.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]]) y = torch.reshape(y, [s[0], s[1], s[3] + 1, s[2]]) y = torch.slice(y, [0, 0, 1, 0], [-1, -1, -1, -1]) y = torch.reshape(y, s) return y def to_scores(self, qk, mask, v): b = 0 if mask is not None: b = torch.logical_not(mask) b = torch.cast(b, torch.floatx()) * qu.big_neg() if self.proxim_b is not None: b += self.proxim_b b = b[:, None, :, None] y = torch.softmax(qk * self.scale + b) cfg = self.cfg y = self.drop(y, cfg.drop_attn or cfg.drop) y = torch.einsum("bnij,bnjv->bniv", y, v) return y class Proximity(UninitializedBuffer): def materialize(self, end, dtype=None, device=None): dtype = self.data.dtype if dtype is None else dtype device = self.data.device if device is None else device kw = {"dtype": dtype, "device": device} y = torch.arange(end, **kw) # fmt: off y = (y[None,] - y[:, None]) y = -torch.log1p(torch.abs(y)) self.data = y[None, None,] # fmt: on self.__class__ = self.cls_to_become class PosTiming(UninitializedBuffer): def materialize(self, dim, end, dtype=None, device=None): dtype = self.data.dtype if dtype is None else dtype device = self.data.device if device is None else device kw = {"dtype": dtype, "device": device} t = torch.arange(end - 1, -1, -1.0, **kw) f = torch.arange(0, dim, 2.0, **kw) f = 1 / (10000 ** (f / dim)) t = torch.einsum("i,d->id", t, f) self.data = torch.cat([torch.sin(t), torch.cos(t)], dim=-1) self.__class__ = self.cls_to_become class PosTiming(UninitializedBuffer): def materialize(self, dim, end, p_max, p_min, p_start, dtype=None, device=None): dtype = self.data.dtype if dtype is None else dtype device = self.data.device if device is None else device kw = {"dtype": dtype, "device": device} t = torch.arange(end, **kw) + p_start assert dim % 2 == 0 n = dim // 2 f = np.log(p_max / p_min) / max(n - 1, 1) f = torch.arange(n, **kw) * -f f = torch.exp(f) * p_min t = torch.einsum("i,d->id", t, f) self.data = torch.cat([torch.sin(t), torch.cos(t)], dim=-1) self.__class__ = self.cls_to_become
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,414
quantapix/qnarre
refs/heads/main
/qnarre/models/old/convert.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import pathlib as pth import qnarre.core.utils as utils from qnarre.core.models import pt from qnarre.core.mnist import dset_for, model_for, params TRAIN = "train" def main(_): ps = utils.Params(params).init_comps() ds = dset_for(ps, TRAIN) # with T.distribute.MirroredStrategy().scope(): mdl = model_for(ps, compiled=True) mdl.train_on_batch(ds) mp = pth.Path.cwd() / ps.dir_model / ps.model assert tf.get_checkpoint_state(str(mp)) mdl.load_weights(str(mp / TRAIN)) c = tf.Checkpoint(model=mdl, optimizer=ps.optimizer) c.restore(str(mp / TRAIN)).expect_partial() # .assert_consumed() for n, s in tf.list_variables(str(mp)): print(n) mp2 = pth.Path.cwd() / ps.dir_model / "mnist_2" print("saving...") c.save(str(mp2 / TRAIN)) for n, s in tf.list_variables(str(mp2)): print(n) assert tf.get_checkpoint_state(str(mp2)) mdl.load_weights(str(mp2 / "train-1")) if __name__ == "__main__": from absl import app, logging logging.set_verbosity(logging.INFO) # DEBUG app.run(main)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,415
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/mbart.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch from argparse import ArgumentParser from torch import nn from ..config.mbart import PreTrained from ...run.mbart import ForConditionalGen def make_linear_from_emb(x): s_vocab, emb_size = x.weight.shape y = nn.Linear(s_vocab, emb_size, bias=False) y.weight.data = x.weight.data return y def to_pytorch(src_path, cfg_path, save_path, finetuned=False, mbart_50=False): d = torch.load(src_path, map_location="cpu")["model"] for k in _IGNORE: d.pop(k, None) s_vocab = d["encoder.embed_tokens.weight"].shape[0] cfg = PreTrained.from_pretrained(cfg_path, s_vocab=s_vocab) if mbart_50 and finetuned: cfg.act_fun = "relu" print(f"Building from config: {cfg}") d["shared.weight"] = d["decoder.embed_tokens.weight"] m = ForConditionalGen(cfg) m.model.load_state_dict(d) if finetuned: m.lm_head = make_linear_from_emb(m.model.shared) print(f"Saving to: {save_path}") torch.save(m.state_dict(), save_path) _IGNORE = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", type=str) x.add_argument("--cfg_path", default="facebook/mbart-large-cc25", type=str) x.add_argument("--save_path", default=None, type=str) x.add_argument("--finetuned", action="store_true") x.add_argument("--mbart_50", action="store_true") y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.finetuned, y.mbart_50)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,416
quantapix/qnarre
refs/heads/main
/qnarre/try/norm.py
""" Layer Normalization ==================== In this tutorial, you will write a high-performance layer normalization kernel that runs faster than the PyTorch implementation. In doing so, you will learn about: * Implementing backward pass in Triton. * Implementing parallel reduction in Triton. """ # %% # Motivations # ----------- # # The *LayerNorm* operator was first introduced in [BA2016]_ as a way to improve the performance # of sequential models (e.g., Transformers) or neural networks with small batch size. # It takes a vector :math:`x` as input and produces a vector :math:`y` of the same shape as output. # The normalization is performed by subtracting the mean and dividing by the standard deviation of :math:`x`. # After the normalization, a learnable linear transformation with weights :math:`w` and biases :math:`b` is applied. # The forward pass can be expressed as follows: # # .. math:: # y = \frac{ x - \text{E}[x] }{ \sqrt{\text{Var}(x) + \epsilon} } * w + b # # where :math:`\epsilon` is a small constant added to the denominator for numerical stability. # Let’s first take a look at the forward pass implementation. import torch import triton import triton.language as tl try: # This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it # should not be added to extras_require in setup.py. import apex HAS_APEX = True except ModuleNotFoundError: HAS_APEX = False @triton.jit def _layer_norm_fwd_fused( X, # pointer to the input Y, # pointer to the output W, # pointer to the weights B, # pointer to the biases Mean, # pointer to the mean Rstd, # pointer to the 1/std stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_SIZE: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) Y += row * stride X += row * stride # Compute mean mean = 0 _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32) _mean += a mean = tl.sum(_mean, axis=0) / N # Compute variance _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32) x = tl.where(cols < N, x - mean, 0.) _var += x * x var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) # Write mean / rstd tl.store(Mean + row, mean) tl.store(Rstd + row, rstd) # Normalize and apply linear transformation for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N w = tl.load(W + cols, mask=mask) b = tl.load(B + cols, mask=mask) x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32) x_hat = (x - mean) * rstd y = x_hat * w + b # Write output tl.store(Y + cols, y, mask=mask) # %% # Backward pass # ------------- # # The backward pass for the layer normalization operator is a bit more involved than the forward pass. # Let :math:`\hat{x}` be the normalized inputs :math:`\frac{ x - \text{E}[x] }{ \sqrt{\text{Var}(x) + \epsilon} }` before the linear transformation, # the Vector-Jacobian Products (VJP) :math:`\nabla_{x}` of :math:`x` are given by: # # .. math:: # \nabla_{x} = \frac{1}{\sigma}\Big( \nabla_{y} \odot w - \underbrace{ \big( \frac{1}{N} \hat{x} \cdot (\nabla_{y} \odot w) \big) }_{c_1} \odot \hat{x} - \underbrace{ \frac{1}{N} \nabla_{y} \cdot w }_{c_2} \Big) # # where :math:`\odot` denotes the element-wise multiplication, :math:`\cdot` denotes the dot product, and :math:`\sigma` is the standard deviation. # :math:`c_1` and :math:`c_2` are intermediate constants that improve the readability of the following implementation. # # For the weights :math:`w` and biases :math:`b`, the VJPs :math:`\nabla_{w}` and :math:`\nabla_{b}` are more straightforward: # # .. math:: # \nabla_{w} = \nabla_{y} \odot \hat{x} \quad \text{and} \quad \nabla_{b} = \nabla_{y} # # Since the same weights :math:`w` and biases :math:`b` are used for all rows in the same batch, their gradients need to sum up. # To perform this step efficiently, we use a parallel reduction strategy: each kernel instance accumulates # partial :math:`\nabla_{w}` and :math:`\nabla_{b}` across certain rows into one of :math:`\text{GROUP_SIZE_M}` independent buffers. # These buffers stay in the L2 cache and then are further reduced by another function to compute the actual :math:`\nabla_{w}` and :math:`\nabla_{b}`. # # Let the number of input rows :math:`M = 4` and :math:`\text{GROUP_SIZE_M} = 2`, # here's a diagram of the parallel reduction strategy for :math:`\nabla_{w}` (:math:`\nabla_{b}` is omitted for brevity): # # .. image:: parallel_reduction.png # # In Stage 1, the rows of X that have the same color share the same buffer and thus a lock is used to ensure that only one kernel instance writes to the buffer at a time. # In Stage 2, the buffers are further reduced to compute the final :math:`\nabla_{w}` and :math:`\nabla_{b}`. # In the following implementation, Stage 1 is implemented by the function :code:`_layer_norm_bwd_dx_fused` and Stage 2 is implemented by the function :code:`_layer_norm_bwd_dwdb`. @triton.jit def _layer_norm_bwd_dx_fused( DX, # pointer to the input gradient DY, # pointer to the output gradient DW, # pointer to the partial sum of weights gradient DB, # pointer to the partial sum of biases gradient X, # pointer to the input W, # pointer to the weights B, # pointer to the biases Mean, # pointer to the mean Rstd, # pointer to the 1/std Lock, # pointer to the lock stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr ): # Map the program id to the elements of X, DX, and DY it should compute. row = tl.program_id(0) cols = tl.arange(0, BLOCK_SIZE_N) mask = cols < N X += row * stride DY += row * stride DX += row * stride # Offset locks and weights/biases gradient pointer for parallel reduction lock_id = row % GROUP_SIZE_M Lock += lock_id Count = Lock + GROUP_SIZE_M DW = DW + lock_id * N + cols DB = DB + lock_id * N + cols # Load data to SRAM x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) w = tl.load(W + cols, mask=mask).to(tl.float32) mean = tl.load(Mean + row) rstd = tl.load(Rstd + row) # Compute dx xhat = (x - mean) * rstd wdy = w * dy xhat = tl.where(mask, xhat, 0.) wdy = tl.where(mask, wdy, 0.) c1 = tl.sum(xhat * wdy, axis=0) / N c2 = tl.sum(wdy, axis=0) / N dx = (wdy - (xhat * c1 + c2)) * rstd # Write dx tl.store(DX + cols, dx, mask=mask) # Accumulate partial sums for dw/db partial_dw = (dy * xhat).to(w.dtype) partial_db = (dy).to(w.dtype) while tl.atomic_cas(Lock, 0, 1) == 1: pass count = tl.load(Count) # First store doesn't accumulate if count == 0: tl.atomic_xchg(Count, 1) else: partial_dw += tl.load(DW, mask=mask) partial_db += tl.load(DB, mask=mask) tl.store(DW, partial_dw, mask=mask) tl.store(DB, partial_db, mask=mask) # Release the lock tl.atomic_xchg(Lock, 0) @triton.jit def _layer_norm_bwd_dwdb( DW, # pointer to the partial sum of weights gradient DB, # pointer to the partial sum of biases gradient FINAL_DW, # pointer to the weights gradient FINAL_DB, # pointer to the biases gradient M, # GROUP_SIZE_M N, # number of columns BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr ): # Map the program id to the elements of DW and DB it should compute. pid = tl.program_id(0) cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) # Iterate through the rows of DW and DB to sum the partial sums. for i in range(0, M, BLOCK_SIZE_M): rows = i + tl.arange(0, BLOCK_SIZE_M) mask = (rows[:, None] < M) & (cols[None, :] < N) offs = rows[:, None] * N + cols[None, :] dw += tl.load(DW + offs, mask=mask, other=0.) db += tl.load(DB + offs, mask=mask, other=0.) # Write the final sum to the output. sum_dw = tl.sum(dw, axis=0) sum_db = tl.sum(db, axis=0) tl.store(FINAL_DW + cols, sum_dw, mask=cols < N) tl.store(FINAL_DB + cols, sum_db, mask=cols < N) # %% # Benchmark # --------- # # We can now compare the performance of our kernel against that of PyTorch. # Here we focus on inputs that have Less than 64KB per feature. # Specifically, one can set :code:`'mode': 'backward'` to benchmark the backward pass. class LayerNorm(torch.autograd.Function): @staticmethod def forward(ctx, x, normalized_shape, weight, bias, eps): # allocate output y = torch.empty_like(x) # reshape input data into 2D tensor x_arg = x.reshape(-1, x.shape[-1]) M, N = x_arg.shape mean = torch.empty((M, ), dtype=torch.float32, device='cuda') rstd = torch.empty((M, ), dtype=torch.float32, device='cuda') # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_SIZE: raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps num_warps = min(max(BLOCK_SIZE // 256, 1), 8) # enqueue kernel _layer_norm_fwd_fused[(M,)](x_arg, y, weight, bias, mean, rstd, x_arg.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps) ctx.save_for_backward(x, weight, bias, mean, rstd) ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.eps = eps return y @staticmethod def backward(ctx, dy): x, w, b, m, v = ctx.saved_tensors # heuristics for amount of parallel reduction stream for DW/DB N = w.shape[0] GROUP_SIZE_M = 64 if N <= 8192: GROUP_SIZE_M = 96 if N <= 4096: GROUP_SIZE_M = 128 if N <= 1024: GROUP_SIZE_M = 256 # allocate output locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device='cuda') _dw = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device) _db = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device) dw = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device) db = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device) dx = torch.empty_like(dy) # enqueue kernel using forward pass heuristics # also compute partial sums for DW and DB x_arg = x.reshape(-1, x.shape[-1]) M, N = x_arg.shape _layer_norm_bwd_dx_fused[(M,)](dx, dy, _dw, _db, x, w, b, m, v, locks, x_arg.stride(0), N, ctx.eps, BLOCK_SIZE_N=ctx.BLOCK_SIZE, GROUP_SIZE_M=GROUP_SIZE_M, num_warps=ctx.num_warps) grid = lambda meta: [triton.cdiv(N, meta['BLOCK_SIZE_N'])] # accumulate partial sums in separate kernel _layer_norm_bwd_dwdb[grid](_dw, _db, dw, db, GROUP_SIZE_M, N, BLOCK_SIZE_M=32, BLOCK_SIZE_N=128) return dx, None, dw, db, None layer_norm = LayerNorm.apply def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'): # create data x_shape = (M, N) w_shape = (x_shape[-1], ) weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True) bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True) x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda') dy = .1 * torch.randn_like(x) x.requires_grad_(True) # forward pass y_tri = layer_norm(x, w_shape, weight, bias, eps) y_ref = torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps).to(dtype) # backward pass (triton) y_tri.backward(dy, retain_graph=True) dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]] x.grad, weight.grad, bias.grad = None, None, None # backward pass (torch) y_ref.backward(dy, retain_graph=True) dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]] # compare assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0) assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0) assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0) assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0) @triton.testing.perf_report( triton.testing.Benchmark( x_names=['N'], x_vals=[512 * i for i in range(2, 32)], line_arg='provider', line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []), line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []), styles=[('blue', '-'), ('green', '-'), ('orange', '-')], ylabel='GB/s', plot_name='layer-norm-backward', args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'} ) ) def bench_layer_norm(M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'): # create data x_shape = (M, N) w_shape = (x_shape[-1], ) weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True) bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True) x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda') dy = .1 * torch.randn_like(x) x.requires_grad_(True) quantiles = [0.5, 0.2, 0.8] # utility functions if provider == 'triton': y_fwd = lambda: layer_norm(x, w_shape, weight, bias, eps) if provider == 'torch': y_fwd = lambda: torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps) if provider == 'apex': apex_layer_norm = apex.normalization.FusedLayerNorm(w_shape).to(x.device).to(x.dtype) y_fwd = lambda: apex_layer_norm(x) # forward pass if mode == 'forward': gbps = lambda ms: 2 * x.numel() * x.element_size() / ms * 1e-6 ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=500) # backward pass if mode == 'backward': gbps = lambda ms: 3 * x.numel() * x.element_size() / ms * 1e-6 y = y_fwd() ms, min_ms, max_ms = triton.testing.do_bench(lambda: y.backward(dy, retain_graph=True), quantiles=quantiles, grad_to_none=[x], rep=500) return gbps(ms), gbps(max_ms), gbps(min_ms) test_layer_norm(1151, 8192, torch.float16) bench_layer_norm.run(save_path='.', print_data=True) # %% # References # ---------- # # .. [BA2016] Jimmy Lei Ba and Jamie Ryan Kiros and Geoffrey E. Hinton, "Layer Normalization", Arxiv 2016
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,417
quantapix/qnarre
refs/heads/main
/qnarre/core/utils.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import numpy as np import os import torch from itertools import chain from transformers.activations import ACT2FN def activation(k, v=None): return ACT2FN[k] if isinstance(k, str) else (v or k) def view_2D(*xs): return [x.view(-1, x.size(-1)) if x is not None else None for x in xs] def view_3D(*xs): return [x.view(-1, x.size(-2), x.size(-1)) if x is not None else None for x in xs] def get_list(xs): ys = set() for x in xs: ys = ys | set(x) ys = list(ys) ys.sort() return ys def group_texts(size, xs): ys = {k: list(chain(*xs[k])) for k in xs.keys()} n = len(ys[list(xs.keys())[0]]) if n >= size: n = (n // size) * size ys = {k: [x[i : i + size] for i in range(0, n, size)] for k, x in ys.items()} ys["labels"] = ys["input_ids"].copy() return ys def init_array(xs, dataset, lim): i = 0 ys = np.full((len(dataset), lim), -100, dtype=np.float32) # float64) for x in xs: batch = x.shape[0] cols = x.shape[1] if i + batch < len(dataset): ys[i : i + batch, :cols] = x else: ys[i:, :cols] = x[: len(dataset) - i] i += batch return ys def big_neg(dtype=None): f = dtype return torch.float16.min if f == "float16" else -1e9 class Dictionary: def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, x): if x not in self.word2idx: self.idx2word.append(x) self.word2idx[x] = len(self.idx2word) - 1 return self.word2idx[x] def __len__(self): return len(self.idx2word) class Corpus: def __init__(self, path): self.dictionary = Dictionary() self.train = self.tokenize(os.path.join(path, "train.txt")) self.eval = self.tokenize(os.path.join(path, "eval.txt")) self.test = self.tokenize(os.path.join(path, "test.txt")) def tokenize(self, path): assert os.path.exists(path) with open(path, "r", encoding="utf8") as f: for line in f: words = line.split() + ["<eos>"] for word in words: self.dictionary.add_word(word) with open(path, "r", encoding="utf8") as f: idss = [] for line in f: words = line.split() + ["<eos>"] ids = [] for word in words: ids.append(self.dictionary.word2idx[word]) idss.append(torch.tensor(ids).type(torch.int64)) ids = torch.cat(idss) return ids def shift_right(x, PAD, dec_START): y = x.new_zeros(x.shape) y[:, 1:] = x[:, :-1].clone() y[:, 0] = dec_START assert PAD is not None y.masked_fill_(y == -100, PAD) return y def shift_right2(x, PAD): y = x.clone() assert PAD is not None y.masked_fill_(y == -100, PAD) eos = (y.ne(PAD).sum(dim=1) - 1).unsqueeze(-1) dec_START = y.gather(1, eos).squeeze() y[:, 1:] = y[:, :-1].clone() y[:, 0] = dec_START return y def causal_mask(shape, dtype, device, c_len=0): # qpx add device b, n = shape y = torch.full((n, n), torch.tensor(torch.finfo(dtype).min, device=device), device=device) cond = torch.arange(y.size(-1), device=device) y.masked_fill_(cond < (cond + 1).view(y.size(-1), 1), 0) y = y.to(dtype) if c_len > 0: y = torch.cat([torch.zeros(n, c_len, dtype=dtype, device=device), y], dim=-1) return y[None, None, :, :].expand(b, 1, n, n + c_len) def expand_mask(x, dtype, len=None): b, n = x.size() len = len if len is not None else n y = 1.0 - x[:, None, None, :].expand(b, 1, len, n).to(dtype) return y.masked_fill(y.to(torch.bool), torch.finfo(dtype).min) # * y
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,418
quantapix/qnarre
refs/heads/main
/qnarre/base/named.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .. import to_class from ..rectify import QNERR class Tagged: _factor = _weight = 1 _bias = 0 def __init__(self, *, tag, factor=None, bias=None, weight=None, **kw): assert tag == self.tag for k in ('agent', 'author', 'authority', 'genre', 'preset', 'place'): kw.pop(k, None) if kw: print(kw) super().__init__(**kw) if factor is not None: self._factor = factor if bias is not None: self._bias = bias if weight is not None: self._weight = weight @classmethod def to_tag(cls): return cls.__name__.lower() @property def tag(self): return type(self).to_tag() @property def factor(self): return self._factor @property def bias(self): return self._bias @property def weight(self): return self.factor * self._weight + self.bias def partial(self, *parts): ws = [] for p in parts: if isinstance(p, tuple): ws.extend(p) else: ws.append(p) return self.factor * (sum(ws) if self._weight == 1 else len(ws)) class Saved: root = None def __init__(self, *, root=None, path=None, text=None, **kw): super().__init__(**kw) if root: self.root = root if text is None and path: text = '' p = self.root / path if p.exists(): text = p.read_text(encoding='ascii', errors=QNERR) if text is not None: self.from_text(text, root=root, **kw) def __str__(self): return '{}{}'.format(self.name, self.suff) @property def suff(self): return '.' + self.tag @property def path(self): return self.root / str(self) def save(self, **kw): t = self.to_text(**kw) self.path.write_text(t, encoding='ascii', errors=QNERR) class Named(Tagged): sequence = None _by_name = {} _by_tag = None _seq = 0 @classmethod def next_seq(cls): cls._seq += 1 return cls._seq @classmethod def next_name(cls): return '{:0>6d}'.format(len(cls._by_name)) @classmethod def create(cls, *, name, tag=None, **kw): n = name if ':' in name else (':' + name) t, n = n.split(':') t = tag if tag else t t = t if t else cls.to_tag() n = cls.next_name() if n == 'fudge' else n k = t + ':' + n try: v = cls._by_name[k] if len(kw): v.__init__(tag=t, name=n, **kw) except KeyError: c = cls if t == cls.to_tag() else to_class(t) if not len(kw): kw['empty'] = True cls._by_name[k] = v = c(tag=t, name=n, **kw) cls._by_tag = None return v @classmethod def by_tag(cls, tag): if cls._by_tag is None: cls._by_tag = bt = {} for n in cls._by_name.values(): bt.setdefault(n.tag, []).append(n) return cls._by_tag.get(tag, ()) def __init__(self, *, name, empty=False, **kw): super().__init__(**kw) self.name = name if not empty and self.sequence is None: self.sequence = self.next_seq() def __str__(self): return "'{}:{}'".format(self.tag, self.name) @property def fields(self): return {'Type': self.tag, 'Name': self.name} def also_as(self, tag): k = tag + ':' + self.name assert k not in self._by_name self._by_name[k] = self class Preset(Saved, Named): props = {} def from_text(self, txt, **_): self.props = eval(txt or '{}')
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,419
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/language/semantic.py
from __future__ import annotations # remove after python 3.11 from functools import wraps from typing import List, Optional, Sequence, Tuple, TypeVar from . import core as tl from triton._C.libtriton.triton import ir T = TypeVar('T') # Create custom exception that prints message "hello" class IncompatibleTypeErrorImpl(Exception): def __init__(self, type_a, type_b): self.type_a = type_a self.type_b = type_b self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__() super(IncompatibleTypeErrorImpl, self).__init__(self.message) # ===----------------------------------------------------------------------===## # Programming Model # ===----------------------------------------------------------------------===## def program_id(axis: int, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_get_program_id(axis), tl.int32) def num_programs(axis: int, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_get_num_programs(axis), tl.int32) # ===----------------------------------------------------------------------===// # Implicit Casting Utilities # ===----------------------------------------------------------------------===// def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype: a_rank = a_ty.int_bitwidth b_rank = b_ty.int_bitwidth a_sn = a_ty.int_signedness b_sn = b_ty.int_signedness # Rules for signedness taken from "Usual arithmetic conversions" on # https://en.cppreference.com/w/c/language/conversion. if a_sn == b_sn: return a_ty if a_rank > b_rank else b_ty elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED: return a_ty if a_rank >= b_rank else b_ty elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED: return b_ty if b_rank >= a_rank else a_ty assert False def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype: # 1) if one operand is double, the other is implicitly # converted to double if a_ty.is_fp64() or b_ty.is_fp64(): return tl.float64 # 2) if one operand is float, the other is implicitly # converted to float if a_ty.is_fp32() or b_ty.is_fp32(): return tl.float32 # 3 ) if one operand is half, the other is implicitly converted to half # unless we're doing / or %, which do not exist natively in PTX for fp16. # Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp if a_ty.is_fp16() or b_ty.is_fp16(): if div_or_mod: return tl.float32 else: return tl.float16 # 4) return bf16 only if both operands are of bf16 if a_ty.is_bf16() or b_ty.is_bf16(): if div_or_mod: return tl.float32 if a_ty.is_bf16() and b_ty.is_bf16(): return tl.bfloat16 return tl.float32 if not a_ty.is_int() or not b_ty.is_int(): assert False # 5 ) both operands are integer and undergo # integer promotion if div_or_mod and a_ty.int_signedness != b_ty.int_signedness: raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() + " because they have different signedness;" "this is unlikely to result in a useful answer. Cast them to the same signedness.") return integer_promote_impl(a_ty, b_ty) # ===----------------------------------------------------------------------===// # Binary Operators # ===----------------------------------------------------------------------===// def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None: if type_a.is_ptr(): if not allow_ptr_a: raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + U* with T != U if type_b.is_ptr() and (type_a != type_b): raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + float if type_b.is_floating(): raise IncompatibleTypeErrorImpl(type_a, type_b) def binary_op_type_checking_impl(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder, allow_lhs_ptr=False, allow_rhs_ptr=False, arithmetic_check=True, div_or_mod=False ) -> Tuple[tl.tensor, tl.tensor]: # implicit broadcasting lhs, rhs = broadcast_impl_value(lhs, rhs, builder) # implicit typecasting lhs_sca_ty = lhs.type.scalar rhs_sca_ty = rhs.type.scalar check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr) check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr) if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr(): ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod) lhs = cast(lhs, ret_sca_ty, builder) rhs = cast(rhs, ret_sca_ty, builder) return lhs, rhs def add(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # offset + ptr # ptr + offset if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr(): input, other = other, input if input_scalar_ty.is_ptr(): return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type) # float + float elif input_scalar_ty.is_floating(): return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type) # int + int elif input_scalar_ty.is_int(): return tl.tensor(builder.create_add(input.handle, other.handle), input.type) assert False def sub(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, True, False) scalar_ty = input.type.scalar # ptr - offset if scalar_ty.is_ptr(): return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type) # float - float if scalar_ty.is_floating(): return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type) # int - int elif scalar_ty.is_int(): return tl.tensor(builder.create_sub(input.handle, other.handle), input.type) assert False def mul(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float * float if scalar_ty.is_floating(): return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type) # * int elif scalar_ty.is_int(): return tl.tensor(builder.create_mul(input.handle, other.handle), input.type) assert False def truediv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # float / int if input_scalar_ty.is_floating() and other_scalar_ty.is_int(): other = cast(other, input_scalar_ty, builder) # int / float elif input_scalar_ty.is_int() and other_scalar_ty.is_floating(): input = cast(input, other_scalar_ty, builder) # int / int (cast to tl.float32) elif input_scalar_ty.is_int() and other_scalar_ty.is_int(): input = cast(input, tl.float32, builder) other = cast(other, tl.float32, builder) # float / float (cast to highest exponent type) elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating(): if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width: other = cast(other, input_scalar_ty, builder) else: input = cast(input, other_scalar_ty, builder) # unreachable else: assert False return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type) def floordiv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar if input_scalar_ty.is_int() and other_scalar_ty.is_int(): ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty) input = cast(input, ret_ty, builder) other = cast(other, ret_ty, builder) if ret_ty.is_int_signed(): return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type) else: return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type) assert False def fdiv(input: tl.tensor, other: tl.tensor, ieee_rounding: bool, builder: ir.builder) -> tl.tensor: input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating(): raise ValueError("both operands of fdiv must have floating scalar type") input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True) ret = builder.create_fdiv(input.handle, other.handle) return tl.tensor(ret, input.type) def mod(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar # float % float if scalar_ty.is_floating(): # input - input.div(other, rounding_mode="floor") * other ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), other, builder), builder) return ret # % int elif scalar_ty.is_int(): if scalar_ty.int_signedness != other_scalar_ty.int_signedness: raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " " "because they have different signedness;" "this is unlikely to result in a useful answer. Cast them to the same signedness.") if scalar_ty.is_int_signed(): return tl.tensor(builder.create_srem(input.handle, other.handle), input.type) else: return tl.tensor(builder.create_urem(input.handle, other.handle), input.type) assert False ############## # bitwise ops ############## def bitwise_op_type_checking_impl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]: input, other = binary_op_type_checking_impl(input, other, builder, False, False, False) input_sca_ty = input.type.scalar other_sca_ty = other.type.scalar if not input_sca_ty.is_int() or not other_sca_ty.is_int(): raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty) ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty) if ret_sca_ty != input_sca_ty: input = cast(input, ret_sca_ty, builder) if ret_sca_ty != other_sca_ty: other = cast(other, ret_sca_ty, builder) return input, other def and_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_and(input.handle, other.handle), input.type) def or_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_or(input.handle, other.handle), input.type) def xor_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_xor(input.handle, other.handle), input.type) def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) if not other.type.is_int1(): other = bitcast(other, tl.dtype("int1"), builder) return and_(input, other, builder) def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) if not other.type.is_int1(): other = bitcast(other, tl.dtype("int1"), builder) return or_(input, other, builder) def not_(input: tl.tensor, builder: ir.builder): if not input.type.is_int1(): input = bitcast(input, tl.dtype("int1"), builder) return invert(input, builder) def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type) def ashr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_ashr(input.handle, other.handle), input.type) def shl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = bitwise_op_type_checking_impl(input, other, builder) return tl.tensor(builder.create_shl(input.handle, other.handle), input.type) # ===----------------------------------------------------------------------===// # Unary Operators # ===----------------------------------------------------------------------===// def plus(input: tl.tensor) -> tl.tensor: return input def minus(input: tl.tensor, builder: ir.builder) -> tl.tensor: input_sca_ty = input.type.scalar if input_sca_ty.is_ptr(): raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")") _0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) return sub(_0, input, builder) def invert(input: tl.tensor, builder: tl.tensor) -> tl.tensor: input_sca_ty = input.type.scalar if input_sca_ty.is_ptr() or input_sca_ty.is_floating(): raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")") _1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) return xor_(input, _1, builder) # ===----------------------------------------------------------------------===// # Comparison Operators # ===----------------------------------------------------------------------===// def _bool_like(v: tl.tensor) -> tl.block_type: if not v.type.is_block(): return tl.int1 shape = v.type.shape return tl.block_type(tl.int1, shape) def greater_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float > float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input)) # > int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input)) assert False def greater_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float >= float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input)) # >= int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input)) assert False def less_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float < float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input)) # < int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input)) assert False def less_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float < float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input)) # < int elif scalar_ty.is_int(): if scalar_ty.is_int_signed(): return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input)) else: return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input)) assert False def equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float == float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input)) # == int elif scalar_ty.is_int(): return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input)) assert False def not_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: input, other = binary_op_type_checking_impl(input, other, builder) scalar_ty = input.type.scalar # float == float if scalar_ty.is_floating(): return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input)) # == int elif scalar_ty.is_int(): return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input)) assert False # ===----------------------------------------------------------------------===// # Block Creation # ===----------------------------------------------------------------------===// def arange(start: int, end: int, builder: ir.builder) -> tl.tensor: if not isinstance(start, int) or not isinstance(end, int): raise ValueError("arange's arguments must be of type tl.constexpr") is_start_int64 = bool(start >> 32) is_end_int64 = bool(end >> 32) if is_start_int64 or is_end_int64: raise ValueError("arange must fit in int32") if end <= start: raise ValueError("arange's end argument must be greater than the start argument") shape = [end - start] ret_ty = tl.block_type(tl.int32, shape) return tl.tensor(builder.create_make_range(start, end), ret_ty) def full(shape: List[int], value, dtype: tl.dtype, builder: ir.builder) -> tl.tensor: if isinstance(value, tl.tensor): assert value.numel.value == 1, "only accepts size-1 tensor" value = cast(value, dtype, builder) ret_ty = tl.block_type(value.dtype, shape) return tl.tensor(builder.create_splat(value.handle, shape), ret_ty) else: # scalar if value == 0: value = builder.get_null_value(dtype.to_ir(builder)) else: get_value_fn = getattr(builder, f"get_{dtype.name}") value = get_value_fn(value) if dtype is None: raise ValueError("dtype must be specified when value is not a tensor") ret_ty = tl.block_type(dtype, shape) return tl.tensor(builder.create_splat(value, shape), ret_ty) # ===----------------------------------------------------------------------===// # Shape Manipulation # ===----------------------------------------------------------------------===// def view(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: # TODO: disable when TritonToTritonGPU handles views properly # assert len(input.shape) == len(dst_shape) numel = 1 for s in dst_shape: numel *= s if input.type.numel != numel: raise ValueError("cannot view block of different shape") ret_ty = tl.block_type(input.type.scalar, dst_shape) return tl.tensor(builder.create_view(input.handle, dst_shape), ret_ty) def reshape(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: raise ValueError("`reshape` is not supported yet. Please use `view` instead if applicable. " "Note that view may reorder elements in an implementation- and context- dependent way.") def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: dst_shape = list(input.type.shape) dst_shape.insert(axis, 1) ret_ty = tl.block_type(input.type.scalar, dst_shape) return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty) def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor: assert can_reorder, "current implementation of `cat` always may reorder elements" assert len(lhs.shape) == 1 ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]]) return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type) def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor: if len(input.shape) != 2: raise ValueError("Only 2D tensors can be transposed") ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]]) return tl.tensor(builder.create_trans(input.handle), ret_type) def broadcast_impl_shape(input: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: if not input.type.is_block(): ret_ty = tl.block_type(input.type, shape) return tl.tensor(builder.create_splat(input.handle, shape), ret_ty) src_shape = input.type.get_block_shapes() if len(src_shape) != len(shape): raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}") if shape == src_shape: return input for i, item in enumerate(src_shape): if shape[i] != item and item != 1: raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})" f" must match the existing size ({item}) at non-singleton dimension" f" {i}: {src_shape}, {shape}") ret_ty = tl.block_type(input.type.scalar, shape) return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty) def broadcast_impl_value(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor: lhs_ty = lhs.type rhs_ty = rhs.type # make_shape_compatible(block, scalar) if lhs_ty.is_block() and not rhs_ty.is_block(): rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape) rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty) # make_shape_compatible(scalar, block) elif not lhs_ty.is_block() and rhs_ty.is_block(): lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape) lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty) # make_shape_compatible(block, block) elif lhs_ty.is_block() and rhs_ty.is_block(): lhs_shape = lhs_ty.get_block_shapes() rhs_shape = rhs_ty.get_block_shapes() if len(lhs_shape) < len(rhs_shape): # Add new axes to lhs for dim in range(len(lhs_shape), len(rhs_shape)): lhs = tl.tensor(builder.create_expand_dims(lhs.handle, 0), tl.block_type(lhs_ty.scalar, [1] + lhs_shape)) lhs_ty = lhs.type lhs_shape = lhs_ty.get_block_shapes() elif len(rhs_shape) < len(lhs_shape): # Add new axes to rhs for dim in range(len(rhs_shape), len(lhs_shape)): rhs = tl.tensor(builder.create_expand_dims(rhs.handle, 0), tl.block_type(rhs_ty.scalar, [1] + rhs_shape)) rhs_ty = rhs.type rhs_shape = rhs_ty.get_block_shapes() assert len(rhs_shape) == len(lhs_shape) ret_shape = [] for i, left in enumerate(lhs_shape): right = rhs_shape[i] if left == 1: ret_shape.append(right) elif right == 1: ret_shape.append(left) elif left == right: ret_shape.append(left) else: raise ValueError("Cannot make_shape_compatible: incompatible dimensions " "at index " + str(i) + ": " + str(left) + " and " + str(right)) if lhs_shape != ret_shape: ret_ty = tl.block_type(lhs_ty.scalar, ret_shape) lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty) if rhs_shape != ret_shape: ret_ty = tl.block_type(rhs_ty.scalar, ret_shape) rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty) # (scalar, scalar) => returns original blocks return lhs, rhs ####### # cast ####### def bitcast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: src_ty = input.type if src_ty.is_block(): dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) if src_ty == dst_ty: return input src_sca_ty = src_ty.scalar dst_sca_ty = dst_ty.scalar if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr(): return cast(input, dst_ty, builder) # Bitcast src_bits = src_sca_ty.primitive_bitwidth dst_bits = dst_sca_ty.primitive_bitwidth if src_bits != dst_bits: raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + " to " "data-type of size " + str(dst_bits)) return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: src_ty = input.type if isinstance(dst_ty, tl.constexpr): dst_ty = dst_ty.value if src_ty.is_block(): dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) if src_ty == dst_ty: return input src_sca_ty = src_ty.scalar dst_sca_ty = dst_ty.scalar # Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64 if (src_sca_ty.is_fp8() and dst_sca_ty.is_floating()) or \ (src_sca_ty.is_floating() and dst_sca_ty.is_fp8()): return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) # bf16 <=> (not fp32) if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \ (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()): return cast(cast(input, tl.float32, builder), dst_sca_ty, builder) # Standard floating types' casting: truncation # fp64 => fp32, fp16, bf16 # fp32 => fp16, bf16 truncate_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth if truncate_fp: return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty) # Standard floating types' casting: extension # fp32 => fp64 # fp16 => fp32, fp64 # bf16 => fp32, fp64 ext_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth if ext_fp: return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting between integer types if src_sca_ty.is_int() and dst_sca_ty.is_int() and \ (src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness): sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool() if dst_sca_ty.is_bool(): ty = input.dtype.to_ir(builder) _0 = tl.tensor(builder.get_null_value(ty), input.dtype) return not_equal(input, _0, builder) else: return tl.tensor(builder.create_int_cast(input.handle, dst_ty.to_ir(builder), sign_extend), dst_ty) # Casting standard floating types to integer types if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int(): if dst_sca_ty.is_bool(): ty = input.dtype.to_ir(builder) _0 = tl.tensor(builder.get_null_value(ty), input.dtype) return not_equal(input, _0, builder) elif dst_sca_ty.is_int_signed(): return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty) else: return tl.tensor(builder.create_fp_to_ui(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting integer types to standard floating types if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating(): if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed(): return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) else: return tl.tensor(builder.create_si_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting pointer types to integer types if src_sca_ty.is_ptr() and dst_sca_ty.is_int(): bitwidth = dst_sca_ty.int_bitwidth if bitwidth == 64: return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)), dst_ty) if bitwidth == 1: return not_equal(cast(input, tl.int64, builder), tl.tensor(builder.get_int64(0), tl.int64), builder) # Casting integer types to pointer types if src_sca_ty.is_int() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty) # Casting pointer types to pointer types if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) assert False, f'cannot cast {input} to {dst_ty}' # ===----------------------------------------------------------------------===// # Memory Operators # ===----------------------------------------------------------------------===// def _str_to_cache_modifier(cache_modifier): cache = ir.CACHE_MODIFIER.NONE # default if cache_modifier: if cache_modifier == ".ca": cache = ir.CACHE_MODIFIER.CA elif cache_modifier == ".cg": cache = ir.CACHE_MODIFIER.CG else: raise ValueError(f"Cache modifier {cache_modifier} not supported") return cache def _str_to_eviction_policy(eviction_policy): eviction = ir.EVICTION_POLICY.NORMAL # default if eviction_policy: if eviction_policy == "evict_last": eviction = ir.EVICTION_POLICY.EVICT_LAST elif eviction_policy == "evict_first": eviction = ir.EVICTION_POLICY.EVICT_FIRST else: raise ValueError(f"Eviction policy {eviction_policy} not supported") return eviction def _str_to_padding_option(padding_option): padding = None # default if padding_option: if padding_option == "zero": padding = ir.PADDING_OPTION.PAD_ZERO elif padding_option == "nan": padding = ir.PADDING_OPTION.PAD_NAN else: raise ValueError(f"Padding option {padding_option} not supported") return padding def _canonicalize_boundary_check(boundary_check, block_shape): if boundary_check: if not hasattr(boundary_check, "__iter__"): boundary_check = [boundary_check] boundary_check = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in boundary_check] for dim in boundary_check: assert isinstance(dim, int) and 0 <= dim < len(block_shape) assert len(boundary_check) > 0 assert len(boundary_check) == len(set(boundary_check)), "Duplicate dimension in `boundary_check`" return sorted(boundary_check) return tuple() def _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): # Load by a block pointer: `pointer_type<block_type<>>` # Block pointer can not have `mask` and `other` arguments if mask or other: raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") elt_ty = ptr.type.element_ty.element_ty assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" if elt_ty.is_int() and padding == ir.PADDING_OPTION.PAD_NAN: raise ValueError("Padding option `nan` is not supported for integer block pointers") # `dst_ty` is de-referenced type of the pointer type dst_ty = ptr.type.element_ty # Check `boundary_check` argument boundary_check = _canonicalize_boundary_check(boundary_check, dst_ty.get_block_shapes()) # Build IR return tl.tensor(builder.create_tensor_pointer_load(ptr.handle, boundary_check, padding, cache, eviction, is_volatile), dst_ty) def _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): # Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` if not ptr.type.scalar.is_ptr(): raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.load`") # Check `mask`, `other`, `boundary_check`, and `padding` arguments if not mask and other: raise ValueError("`other` cannot be provided without `mask`") if padding or boundary_check: raise ValueError("`padding_option` or `boundary_check` argument is not supported for loading a tensor of" "pointers or loading a scalar. Because the compiler does not know the boundary; please " "use block pointers (defined by `make_block_ptr`) instead") # For a pointer of scalar, check the type of `mask` and `other` if not ptr.type.is_block(): if mask and mask.type.is_block(): raise ValueError("Mask argument cannot be block type if pointer argument is not a block") if other and other.type.is_block(): raise ValueError("Other argument cannot be block type if pointer argument is not a block") # Make `mask` and `other` into the same shape as `ptr` if ptr.type.is_block(): if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) if other: other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder) # Get `pointer_type<elt_ty>` and `elt_ty` ptr_ty = ptr.type.scalar elt_ty = ptr_ty.element_ty # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if elt_ty == tl.int1: elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) # Cast `other` into `ele_ty` type if other: other = cast(other, elt_ty, builder) # Create loaded result type `dst_ty` if ptr.type.is_block(): shape = ptr.type.get_block_shapes() dst_ty = tl.block_type(elt_ty, shape) else: # Load by de-referencing the pointer of scalar dst_ty = elt_ty # Build IR if not mask: return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty) else: return tl.tensor(builder.create_masked_load(ptr.handle, mask.handle, other.handle if other else None, cache, eviction, is_volatile), dst_ty) def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], boundary_check, padding_option: str, cache_modifier: str, eviction_policy: str, is_volatile: bool, builder: ir.builder) -> tl.tensor: # Cache, eviction and padding options cache = _str_to_cache_modifier(cache_modifier) eviction = _str_to_eviction_policy(eviction_policy) padding = _str_to_padding_option(padding_option) if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): # Load by a block pointer: `pointer_type<block_type<>>` return _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) else: # Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` return _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) def _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder): # Store by a block pointer: `pointer_type<block_type<>>` # Block pointers can not have the `mask` argument if mask: raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") # Check same shape and element type block_shape = ptr.type.element_ty.get_block_shapes() if not val.type.is_block(): val = broadcast_impl_shape(val, block_shape, builder) assert val.type.is_block(), "Value argument must be block type or a scalar" assert block_shape == val.type.get_block_shapes(), "Block shape and value shape mismatch" assert ptr.type.element_ty.element_ty == val.type.element_ty, "Block element type and value element type mismatch" elt_ty = ptr.type.element_ty.element_ty assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" # Check `boundary_check` argument boundary_check = _canonicalize_boundary_check(boundary_check, block_shape) # Build IR return tl.tensor(builder.create_tensor_pointer_store(ptr.handle, val.handle, boundary_check, cache, eviction), tl.void) def _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder): # Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` if not ptr.type.scalar.is_ptr(): raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.store`") # Check `boundary_check` argument if boundary_check: raise ValueError("`boundary_check` argument is not supported for storing a tensor of pointers or storing a " "scalar. Because the compiler does not know the boundary; please use block pointers " "(defined by `make_block_ptr`) instead") # For a pointer of scalar, check the type of `val` and `mask` if not ptr.type.is_block(): if val.type.is_block(): raise ValueError("Value argument cannot be block type if pointer argument is not a block") if mask and mask.type.is_block(): raise ValueError("Mask argument cannot be block type if pointer argument is not a block") # Make `mask` and `val` into the same shape as `ptr` if ptr.type.is_block(): val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) ptr_ty = ptr.type.scalar elt_ty = ptr_ty.element_ty # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if elt_ty == tl.int1: elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) # Cast to target data type val = cast(val, elt_ty, builder) # Build IR if not mask: return tl.tensor(builder.create_store(ptr.handle, val.handle, cache, eviction), tl.void) if not mask.type.scalar.is_bool(): raise ValueError("Mask must have boolean scalar type") return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, cache, eviction), tl.void) def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], boundary_check, cache_modifier: str, eviction_policy: str, builder: ir.builder) -> tl.tensor: # Cache and eviction options cache = _str_to_cache_modifier(cache_modifier) eviction = _str_to_eviction_policy(eviction_policy) if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): # Store by a block pointer: `pointer_type<block_type<>>` return _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder) else: # Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>` return _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder) ######### # atomic ######### def atomic_cas(ptr: tl.tensor, cmp: tl.tensor, val: tl.tensor, builder: ir.builder) -> tl.tensor: element_ty = ptr.type.scalar.element_ty if element_ty.primitive_bitwidth not in [16, 32, 64]: raise ValueError("atomic_cas only supports elements with width {16, 32, 64}") return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle), val.type) def atom_red_typechecking_impl(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, op: str, builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]: if not ptr.type.scalar.is_ptr(): raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__()) element_ty = ptr.type.scalar.element_ty if element_ty is tl.float16 and op != 'add': raise ValueError("atomic_" + op + " does not support fp16") if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]: raise ValueError("atomic_" + op + " does not support " + str(element_ty)) if ptr.type.is_block(): if mask: mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) if val: val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) val = cast(val, ptr.type.scalar.element_ty, builder) if not mask: mask_ir = builder.get_int1(True) mask_ty = tl.int1 if ptr.type.is_block(): mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes()) mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes()) mask = tl.tensor(mask_ir, mask_ty) return ptr, val, mask def atomic_max(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder) sca_ty = val.type.scalar # direct call to atomic_max for integers if sca_ty.is_int(): if sca_ty.is_int_signed(): return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, ptr.handle, val.handle, mask.handle), val.type) else: return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, ptr.handle, val.handle, mask.handle), val.type) # for float # return atomic_smax(i_ptr, i_val) if val >= 0 # return atomic_umin(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) pos = greater_equal(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) neg = less_than(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle), i_val.type) neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle), i_val.type) return where(pos, pos_ret, neg_ret, builder) def atomic_min(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder) sca_ty = val.type.scalar # direct call to atomic_min for integers if sca_ty.is_int(): if sca_ty.is_int_signed(): return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, ptr.handle, val.handle, mask.handle), val.type) else: return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, ptr.handle, val.handle, mask.handle), val.type) # for float # return atomic_smin(i_ptr, i_val) if val >= 0 # return atomic_umax(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) pos = greater_equal(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) neg = less_than(val, tl.tensor(builder.get_fp32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle), i_val.type) neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle), i_val.type) return where(pos, pos_ret, neg_ret, builder) def atomic_add(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder) sca_ty = val.type.scalar op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle), val.type) def atomic_and(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle), val.type) def atomic_or(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle), val.type) def atomic_xor(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle), val.type) def atomic_xchg(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, builder: ir.builder) -> tl.tensor: ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder) return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle), val.type) # ===----------------------------------------------------------------------===// # Linear Algebra # ===----------------------------------------------------------------------===// def dot(lhs: tl.tensor, rhs: tl.tensor, allow_tf32: bool, out_dtype: tl.dtype, builder: ir.builder) -> tl.tensor: assert lhs.type.is_block() and rhs.type.is_block() assert lhs.dtype == rhs.dtype, "lhs and rhs must have the same dtype!" assert len(lhs.shape) == 2 and len(rhs.shape) == 2 assert lhs.shape[1].value == rhs.shape[0].value assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \ and rhs.shape[1].value >= 16,\ "small blocks not supported!" if lhs.type.scalar.is_int(): assert lhs.type.scalar == tl.int8, "only int8 supported!" # TODO: This is CUDA specific, check if ROCm has the same limitation assert lhs.shape[1].value >= 32, "small blocks not supported!" _0 = builder.get_int32(0) ret_scalar_ty = tl.int32 elif lhs.type.scalar.is_fp32() or lhs.type.scalar.is_bf16(): _0 = builder.get_fp32(0) ret_scalar_ty = tl.float32 else: _0 = builder.get_fp16(0) if out_dtype.is_fp16() else builder.get_fp32(0) ret_scalar_ty = out_dtype M = lhs.type.shape[0] N = rhs.type.shape[1] _0 = builder.create_splat(_0, [M, N]) ret_ty = tl.block_type(ret_scalar_ty, [M, N]) return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) # ===----------------------------------------------------------------------===// # Indexing # ===----------------------------------------------------------------------===// def where(condition: tl.tensor, x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: condition = cast(condition, tl.int1, builder) if condition.type.is_block(): condition, x = broadcast_impl_value(condition, x, builder) x, y = broadcast_impl_value(x, y, builder) condition, x = broadcast_impl_value(condition, x, builder) x, y = binary_op_type_checking_impl(x, y, builder, True, True) if not condition.type.is_block(): condition, _ = broadcast_impl_value(condition, x, builder) ret_ty = x.type return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty) # ===----------------------------------------------------------------------===// # Reduction # ===----------------------------------------------------------------------=== def reduction( inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder ) -> Tuple[tl.tensor, ...]: # get result shape shape = inputs[0].type.shape ret_shape = [s for i, s in enumerate(shape) if i != axis] for t in inputs: assert t.type.shape == shape def wrap_tensor(x, scalar_ty): if ret_shape: res_ty = tl.block_type(scalar_ty, ret_shape) else: # 0d-tensor -> scalar res_ty = scalar_ty return tl.tensor(x, res_ty) reduce_op = builder.create_reduce([t.handle for t in inputs], axis) region_builder_fn(reduce_op) reduce_op.verify() return tuple( wrap_tensor(reduce_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs)) ) # ===----------------------------------------------------------------------=== # Math # ===----------------------------------------------------------------------=== def _check_dtype(dtypes: List[str]) -> T: """ We following libdevice's convention to check accepted data types for math functions. It is not a good practice to support all data types as accelerators/GPUs don't support many float16 and bfloat16 math operations. We should let the users know that they are using and invoke explicit cast to convert the data type to the supported one. """ def wrapper(fn): @wraps(fn) def check(*args, **kwargs): # concatenate args and kwargs all_args = list(args) + list(kwargs.values()) for arg in [a for a in all_args if isinstance(a, tl.tensor)]: if arg.type.scalar.name not in dtypes: raise ValueError(f"Expected dtype {dtypes} but got {arg.type.scalar.name}") return fn(*args, **kwargs) return check return wrapper def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: x, y = binary_op_type_checking_impl(x, y, builder) # FIXME(Keren): not portable, should be fixed from . import math return math.mulhi(x, y, _builder=builder) @_check_dtype(dtypes=["fp32", "fp64"]) def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor: # FIXME(Keren): not portable, should be fixed from . import math return math.floor(x, _builder=builder) @_check_dtype(dtypes=["fp32", "fp64"]) def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_exp(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def log(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_log(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_cos(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_sin(x.handle), x.type) @_check_dtype(dtypes=["fp32", "fp64"]) def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_sqrt(x.handle), x.type) def abs(x: tl.tensor, builder: ir.builder) -> tl.tensor: dtype = x.dtype if dtype.is_floating(): return tl.tensor(builder.create_fabs(x.handle), x.type) elif dtype.is_int_signed(): return tl.tensor(builder.create_iabs(x.handle), x.type) elif dtype.is_int_unsigned(): return x # no-op else: assert False, f"Unexpected dtype {dtype}" ## def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to multiple_of does not match the length of values") x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context())) return x def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to max_contiguous does not match the length of values") x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context())) return x def debug_barrier(builder: ir.builder) -> tl.tensor: return tl.tensor(builder.create_barrier(), tl.void) def device_print(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor: new_args = [] for arg in args: new_args.append(arg.handle) return tl.tensor(builder.create_print(prefix, new_args), tl.void) def device_assert(cond: tl.tensor, msg: str, file_name: str, func_name, lineno: int, builder: ir.builder) -> tl.tensor: cond_ty = cond.type if not cond_ty.is_block(): cond_ty = tl.block_type(cond_ty.scalar, (1,)) cond = tl.tensor(builder.create_splat(cond.handle, (1,)), cond_ty) return tl.tensor(builder.create_assert(cond.handle, msg, file_name, func_name, lineno), tl.void) def _convert_elem_to_ir_value(builder, elem, require_i64): if isinstance(elem, tl.constexpr): return builder.get_int64(elem.value) if require_i64 else builder.get_int32(elem.value) elif isinstance(elem, tl.tensor): assert elem.numel.value == 1, "Expected a scalar in shape/strides/offsets" assert elem.dtype.is_int(), "Expected an integer scalar type in shape/strides/offsets" if elem.dtype != tl.int64 and require_i64: return builder.create_int_cast(elem.handle, builder.get_int64_ty(), elem.dtype.is_int_signed()) elif elem.dtype != tl.int32: return builder.create_int_cast(elem.handle, builder.get_int32_ty(), elem.dtype.is_int_signed()) return elem.handle assert False, f"Unsupported element type in shape/strides/offsets: {type(elem)}" def _convert_to_ir_values(builder, list_like, require_i64=True): if hasattr(list_like, "__iter__"): return [_convert_elem_to_ir_value(builder, elem, require_i64) for elem in list_like] return [_convert_elem_to_ir_value(builder, list_like, require_i64)] def make_block_ptr(base: tl.tensor, shape, strides, offsets, block_shape, order, builder: ir.builder) -> tl.tensor: # Convert dynamic arguments to IR values # NOTES(Chenggang): current `shape/strides` are `int64_t`, while `offsets/block_shape` are `int32_t` shape = _convert_to_ir_values(builder, shape) strides = _convert_to_ir_values(builder, strides) offsets = _convert_to_ir_values(builder, offsets, require_i64=False) # Check `base` type if not base.type.is_ptr() or base.type.element_ty.is_block(): raise ValueError("Expected `base` to be a pointer type (but not a block pointer type or others)") # Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>` if base.type.element_ty == tl.int1: base = cast(base, tl.pointer_type(tl.int8, base.type.address_space), builder) # Check whether `block_shape` is static if not hasattr(block_shape, "__iter__"): block_shape = [block_shape] block_shape = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in block_shape] assert all([isinstance(elem, int) and -2**31 <= elem < 2**31 for elem in block_shape]), \ "Expected a list of constant integers (`int32_t` range) in `block_shape`" # Check `order` if not hasattr(order, "__iter__"): order = [order] order = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in order] assert sorted(order) == list(range(len(order))), "Expected a permutation of (0, 1, ..., len(order)-1) in order" # Must have same length assert all([len(block_shape) == len(list_like) for list_like in [shape, strides, offsets, order]]), \ "Expected shape/strides/offsets/block_shape to have the same length" # Build value, the type is: # `pointer_type<blocked<shape, element_type>>` in Python # `tt.ptr<tensor<shape, element_type>>` in MLIR handle = builder.create_make_block_ptr(base.handle, shape, strides, offsets, block_shape, order) return tl.tensor(handle, tl.pointer_type(tl.block_type(base.type.element_ty, block_shape))) def advance(base: tl.tensor, offsets, builder: ir.builder) -> tl.tensor: # Convert dynamic offsets to IR values offsets = _convert_to_ir_values(builder, offsets, require_i64=False) # Advanced block pointer type is the same as before return tl.tensor(builder.create_advance(base.handle, offsets), base.type)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,420
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/dpr.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import collections from ...tokens.utils import BatchEncoding from .bert import Bert VOCAB_FS = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} CONTEXT_ENCODER_PRETRAINED_VOCAB_MAP = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt", "facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt", }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json", "facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json", }, } QUESTION_ENCODER_PRETRAINED_VOCAB_MAP = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt", "facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt", }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json", "facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json", }, } READER_PRETRAINED_VOCAB_MAP = { "vocab_file": { "facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt", "facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt", }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json", "facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json", }, } CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } READER_PRETRAINED_INIT_CONFIGURATION = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class DPRContextEncoderTokenizer(Bert): vocab_fs = VOCAB_FS vocab_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_MAP input_caps = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class DPRQuestionEncoderTokenizer(Bert): vocab_fs = VOCAB_FS vocab_map = QUESTION_ENCODER_PRETRAINED_VOCAB_MAP input_caps = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION DPRSpanPrediction = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"], ) DPRReaderOutput = collections.namedtuple( "DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"] ) class Mixin: def __call__( self, questions, titles=None, texts=None, padding=False, truncation=False, max_length=None, return_tensors=None, return_attention_mask=None, **kw, ): if titles is None and texts is None: return super().__call__( questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kw, ) elif titles is None or texts is None: text_pair = titles if texts is None else texts return super().__call__( questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kw, ) titles = titles if not isinstance(titles, str) else [titles] texts = texts if not isinstance(texts, str) else [texts] n_passages = len(titles) questions = questions if not isinstance(questions, str) else [questions] * n_passages if len(titles) != len(texts): raise ValueError( f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts." ) encoded_question_and_titles = super().__call__( questions, titles, padding=False, truncation=False )["input_ids"] encoded_texts = super().__call__( texts, add_special_tokens=False, padding=False, truncation=False )["input_ids"] encoded_inputs = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip( encoded_question_and_titles, encoded_texts ) ] } if return_attention_mask is not False: attention_mask = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.PAD) for input_id in input_ids]) encoded_inputs["attention_mask"] = attention_mask return self.pad( encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors ) def decode_best_spans( self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans=16, max_answer_length=64, num_spans_per_passage=4, ): input_ids = reader_input["input_ids"] start_logits, end_logits, relevance_logits = reader_output[:3] n_passages = len(relevance_logits) sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__) nbest_spans_predictions = [] for doc_id in sorted_docs: sequence_ids = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id if sequence_ids[-1] == self.PAD: sequence_len = sequence_ids.index(self.PAD) else: sequence_len = len(sequence_ids) best_spans = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index : end_index + 1]), ) ) if len(nbest_spans_predictions) >= num_spans: break return nbest_spans_predictions[:num_spans] def _get_best_spans( self, start_logits, end_logits, max_answer_length, top_spans, ): scores = [] for (start_index, start_score) in enumerate(start_logits): for (answer_length, end_score) in enumerate( end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score)) scores = sorted(scores, key=lambda x: x[1], reverse=True) chosen_span_intervals = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]") length = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}") if any( [ start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ] ): continue chosen_span_intervals.append((start_index, end_index)) if len(chosen_span_intervals) == top_spans: break return chosen_span_intervals class DPRReaderTokenizer(Mixin, Bert): vocab_fs = VOCAB_FS vocab_map = READER_PRETRAINED_VOCAB_MAP input_caps = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = READER_PRETRAINED_INIT_CONFIGURATION model_input_names = ["input_ids", "attention_mask"]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,421
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/sanitizer.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import gzip import codecs import pathlib as pth from qnarre.log import Logger log = Logger(__name__) c_map = { '\u2013': '-', '\xe9': 'e', '\u2019': "'", '\u201c': '"', '\u201d': '"', '\xbd': 'half', '\x96': '"', '\u2014': '-', '\u2018': "'", '\u2026': '...', '\xb8': ',', '\u2022': '-', '\xa7': 'para. ', '\xa9': '(c)', '\xae': '(R)', '\x92': "'", '\x93': '"', '\x94': '"', '\x99': '-', '\xad\xad': '', '\ufffd': 'ee', '\u2122': '(TM)', '””': '"', '�': '', '™”': '"', '\u200e': '', '—“': '- "', '▶': '', '”’': '"-s', '��': '', '…”': '... "', '😊': ':-)', '😎': ';-)', '›': '', '“…': '"...', '”…': '"...', 'ü': 'u', '😳': '', '😭': '', '😴': '', '😂': '', '😉': ';-)', 'ó': 'o', 'é’': "e'", '\u200b': '', '••••••••': '.....', '“…”': '"..."', '😢': ':-(', '———————————————————————————————————————————————————————': '', '·': '', '©': '(c)' } # '\xd4': "'", 'd5': "'", 'd2': '"', 'd3': '"', # 'de': 'fi', 'df': 'fl', 'a5': 'M', 'e1': '?', # 'a2': '?', 'db': '?' s_map = {'\r': '\n', '\t': ' ', ' ': ' ', ' \n': '\n'} def qnarre_handler(err): # k = err.object[err.start:err.end].hex() k = err.object[err.start:err.end] if k in c_map: # print('replacing {} with {}'.format(k, c_map[k])) return c_map[k], err.end # print(err.object[err.start - 20:err.end + 20]) raise err QNERR = 'qnerr' codecs.register_error(QNERR, qnarre_handler) def sanitize(txt): if isinstance(txt, str): txt = txt.replace('\xa0', ' ') try: return txt.encode('ascii', QNERR).decode('ascii', QNERR) except UnicodeError: # print(repr(txt)) raise elif isinstance(txt, pth.Path): p = txt s = p.suffix t = p.with_suffix('.qpx') def _sanitize(o): with o(t, 'w+t', encoding='ascii', errors=QNERR) as d: with o(p, 'rt') as s: for ln in s: ln = ln.encode('ascii', QNERR) d.write(ln.decode('ascii', QNERR)) if s == '.gz': _sanitize(gzip.open) else: _sanitize(open) t.rename(p) elif txt: print('sanitize called on', repr(txt)) return txt class Sanitizer: base = None @classmethod def create(cls, base=None): if base: cls.base = pth.Path(base) return cls() def load(self, path): def _text_at(): b = self.base p = b / path if b else pth.Path(path) try: s = p.read_text(errors='qnarre') except UnicodeDecodeError as e: log.error('Decode error {}', e) raise e for k, v in s_map.items(): s = s.replace(k, v) return p, s self._path, self._text = _text_at() def dump(self, path=None): p = path or self._path p.write_text(self._text) if __name__ == '__main__': import argparse as ap a = ap.ArgumentParser() a.add_argument('files', nargs='*', help='Files to read') a = a.parse_args() c = pth.Path.cwd() for f in a.files: sanitize(c / f)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,422
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/junk.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import re import pathlib as pth from .log import Logger from .sanitizer import QNERR from .base import config, Adrs from .nominals import flags, nbs log = Logger(__name__) def splicer(txt): p = '' for l in txt.splitlines(): if len(l) == 78 and l[-1] == '=': p += l[:-1] else: p += l yield ' '.join(p.split()) p = '' line = r'.+?\n' lines = r'(.+?\n)*?' line_2 = r'.+?==\n' eml_junk = ( r'[*]{4}', r' (GMT-05:00)', r'\[LINK:[^][]+\]', r'^X-Mailer: ' + line, r'^References: ' + line, r'^Return-Path: ' + line, r'^Mime-Version: ' + line, r'^Content-Type: ' + line, r'^From MAILER-DAEMON ' + line, r'^X-Gm-Message-State: ' + lines + line_2, r'^(X-Google-)?DKIM-Signature: ' + lines + line_2, r'^(X-)?Received: ' + lines + r'.+?\(P[D|S]T\)\n', ) eml_junk = tuple(re.compile(flags + p) for p in eml_junk) qt = r'(?P<qt>[> ]+)' dig = r'[\d -]' tel = r'[%.\d -]' nb2 = r'[^][\n]' ads = r'(?P<tx>([;, ]*' + Adrs.adr_pat + r'[;, ]*)+)' split_junk = ( (r'<tel:' + tel + r'+?>', '', r'(?P<lf><tel:' + tel + r'*?)\n' + qt + r'?(?P<rt>' + tel + r'+>)'), (r'<' + dig + r'+?>', '', r'(?P<lf><\d' + dig + r'*?)\n' + qt + r'?(?P<rt>' + dig + r'+>)'), (r'<mailto:' + nbs + r'+?>', '', r'(?P<lf><mailto:' + nbs + r'*?)\n' + qt + r'?(?P<rt>' + nbs + r'+>)'), (r'\[[ ]*mailto:(?P<tx>' + nb2 + r'+?)\]', r' \g<tx> ', r'(?P<lf>\[mailto:' + nb2 + r'*?)\n' + qt + r'?(?P<rt>' + nb2 + r'+\])'), (r'<' + ads + r'>', r' \g<tx> ', r'^(?P<lf>(?P<qt>>+ )?.*?<' + nbs + r'*)\n(?P=qt)(?P<rt>' + nbs + r'*>)'), (r'<(blocked::)?\W*(http:|https:|javascript:)' + nbs + r'+>', '', r'(?P<lf><http:' + nbs + r'*?)\n' + qt + r'?(?P<rt>' + nbs + r'+>)'), (r'<' + nbs + r'+?(.pdf|.jpg|.jpeg|.png|.gif|.tif|.doc|.mov|.docx|.ptx|.zip)>', '', ''), (r'\[ ?(cid|image|Description|http):' + nb2 + r'+?\]', '', r'(?P<lf>\[ ?(cid|image|Description|http):' + nb2 + r'*?)\n' + qt + r'?(?P<rt>' + nb2 + r'+\])'), ) split_junk = tuple((re.compile(flags + p), r, re.compile(flags + s)) for p, r, s in split_junk) ow = r'(On (?:(?!wrote:).)*\n(?:(?!On ).)*wrote:)$' ow = re.compile(flags + ow) def ow_splicer(txt): for e in ow.split(txt): if ow.match(e): yield e.replace('\n', ('' if e.endswith(' wrote:') else ' ')) else: yield e def defragment(txt): t = txt.strip() done = False while not done: done = True t = ''.join(ow_splicer(t)) for p, r, s in split_junk: while p.pattern != flags: t, n = p.subn(r, t) if n: done = False else: break if s.pattern != flags: t, n = s.subn(r'\g<lf>\g<rt>', t) if n: done = False return t def simple_replacer(txt): for ln in txt.strip().splitlines(): for p in config.line_junk: ln = ln.replace(p, '') for p, s in config.line_replace: ln = ln.replace(p, s) yield ln def patch(txt): t = txt for p, r in tuple((re.compile(flags + p), r) for p, r in config.fixups): t = p.sub(r, t) for r in tuple(re.compile(flags + p) for p in config.quotes): t = r.sub(r'\g<qt> | \g<tx>', t) return t nw = r'\W+' nwc = re.compile(flags + nw) def re_junks(name, rexes=None): rs = {} if rexes is None else rexes p = pth.Path.cwd() / name if p.exists(): t = '\n'.join(splicer(p.read_text('ascii', QNERR))).lower() for s in t.split('\n\n\n'): if s: ss = [s for s in nwc.split(s) if s] r = '\n' + ' '.join(ss) + '\n' if r not in rs: e = flags + r'^\W*' + '\W*'.join(ss) + '\W*$' rs[r] = re.compile(e) else: log.warning('Defaults for junk were not found') return rs class Junk: default = 'def_junks.txt' js = () rejs = re_junks(default) _sorted_rejs = None @classmethod def junks_from(cls, path): rs = re_junks(path, cls.rejs) t = '\n'.join(sorted(rs.keys())) pth.Path(cls.default).write_text(t, 'ascii', QNERR) def __init__(self, junks=None): if junks is not None: self.js = junks @property def sorted_rejs(self): if self._sorted_rejs is None: ks = sorted(self.rejs.keys(), key=lambda k: len(k), reverse=True) self._sorted_rejs = tuple(self.rejs[k] for k in ks) return self._sorted_rejs def add(self, junks): self.js = *self.js, *junks def dejunk_line(self, line): ln = ' '.join(line.split()) for j in self.js: ln = ln.replace(j, '') ln = ' '.join(ln.split()) return ln def dejunk_text(self, txt): t = '\n'.join(splicer(txt)) for p in eml_junk: t = p.sub('', t) t = defragment(t) t = '\n'.join(simple_replacer(t)) for j in self.sorted_rejs: t = j.sub('', t) t = patch(t) ls = t.strip().splitlines() return '\n'.join(' '.join(l.split()) for l in ls).strip() if __name__ == '__main__': j = Junk() j.junks_from('qnarre/junk.txt')
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,423
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/reader.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import re import os import pathlib as pth import collections as co from .date import Date from .log import Logger from .header import Line from .error import ExcludeException from .sanitizer import QNERR, sanitize from .base import config, LnkDate, LnkFrom log = Logger(__name__) def scanner(path, suffs, files=(), pfix=None, **_): n = 0 files = files or ('test', ) every = 'all' in files def scan_dir(path): with os.scandir(path) as es: for e in es: p = pth.Path(e.path) if not p.name.startswith('.'): ss = ''.join(p.suffixes) if p.is_file() and ss in suffs: if not pfix or p.stem.endswith(pfix): st = p.name.replace(ss, '') if every or st in files: nonlocal n n += 1 yield p elif p.is_dir(): yield from scan_dir(p) yield from scan_dir(path) if every: log.info('{} has {} files', path, n) def liner(path, clip=None, **kw): s = path.suffix if s == '.pdf': if clip is None: clip = 7 if b'PhoneView' not in path.read_bytes(): clip = 2 c = config.pdf_context() g = config.pdf_generator(c, **kw) e = config.pdf_executor(c, g) with open(path, "rb") as f: for p in config.pdf_page.get_pages(f): ls = e.process_page(p) for l in ([l for l in ls][:-clip] if clip else ls): yield sanitize(' '.join(l.split())) g.close() elif s == '.txt': # print(str(path)) # sanitize(path) with open(path, encoding='ascii', errors=QNERR) as f: for l in f: yield ' '.join(l.split()) elif s == '.md': # print(str(path)) # sanitize(path) with open(path, encoding='ascii', errors=QNERR) as f: for l in f: yield ' '.join(l.split()) def msger(path, src=None, msg_range=None, **kw): if src is None: from .mboxes import Mbox src = Mbox.qsrc n = 0 for m in src(path, **kw): if not msg_range or n in msg_range: yield m n += 1 if msg_range and n > max(msg_range): break if not msg_range: log.info('{} has {} messages', path.name, n) ws = re.compile(r'[a-zA-Z_]+,?', re.ASCII) def names(txt='', default='Cyndi'): t = ' '.join(ws.findall(txt)) t = t or default return ', '.join(t.split(',')) class Reader: def __init__(self, path): self.path = path def pdf_to_txt(self, **kw): for p in scanner(self.path, ('.pdf', ), **kw): p2 = p.with_suffix('.txt') if p2.exists(): log.warning('File {} exists, skipped', str(p2)) else: p2.write_text('\n'.join(liner(p, **kw)), 'ascii', QNERR) def from_tbox(self, *, ctxt, cntr, **kw): on = ' on ' sent = 'Sent ' recv = 'Received ' recv_from = 'Received from ' def src(path, **_): title = date = txt = None from_ = host = config.DEFAULT to = names() for ln in liner(path, **kw): if title and ln == title: continue elif ln.startswith('Messages with'): if date: t = '' if txt is None else txt yield date, from_, to, host, t title, date, txt = ln, None, None from_ = host = config.DEFAULT to = names(ln[len('Messages with'):]) continue elif ln.startswith('Messages between'): if date: t = '' if txt is None else txt yield date, from_, to, host, t title, date, txt = ln, None, None from_ = host = names(ln[len('Messages between'):]) to = ', '.join((from_, names())) continue elif ln.startswith('Messages'): if date: t = '' if txt is None else txt yield date, from_, to, host, t title, date, txt = ln, None, None from_ = host = config.DEFAULT to = names() continue if ln.startswith(sent) or ln.startswith('Send To '): if ln.startswith(sent): i = ln.find(on) i = (i + len(on)) if i > 0 else len(sent) ln = ln[i:] i = ln.rfind(':') i = ln.rfind('!') if i < 0 else i ln = ln if i < 0 else ln[:(i + 6)] else: i = ln.find(' at ') ln = ln[(i + len(' at ')):] try: d = Date.from_txt(ln) except ValueError as e: log.info('Failed to extract date {}', e) else: if date: t = '' if txt is None else txt yield date, from_, to, host, t date = d from_ = host txt = None continue elif ln.startswith(recv) or ln.startswith('From '): i = ln.find(on) if ln.startswith(recv_from): f = names(ln[len(recv_from):i], to) ln = ln[(i + len(on)):] elif ln.startswith('From '): i = ln.find(' at ') f = names(ln[len('From '):i], to) ln = ln[(i + len(' at ')):] else: i = (i + len(on)) if i > 0 else len(recv) ln = ln[i:] i = ln.rfind(':') i = ln.rfind('!') if i < 0 else i if i >= 0: i = i + 6 f = names(ln[i:], to) ln = ln[:i] try: d = Date.from_txt(ln) except ValueError as e: log.info('Failed to extract date {}', e) else: if date: t = '' if txt is None else txt yield date, from_, to, host, t date = d from_ = f txt = None continue if txt is None: txt = ln else: txt = '\n'.join((txt, ln)) if date: t = '' if txt is None else txt yield date, from_, to, host, t for p in scanner(self.path, ('.txt'), **kw): ctxt.current = n = str(p.relative_to(self.path)) cntr.retitle(n) for m in msger(p, src, **kw): yield n, m def from_sbox(self, *, ctxt, cntr, **kw): def src(path, date=None, topic=None, **kw): from_ = txt = None for l in liner(path, **kw): ps = l.split('::') if len(ps) == 2: if from_: date = date.next_sec() t = '' if txt is None else txt # print(date, from_, repr(t)) yield date, topic, from_, t from_, txt = ps else: if txt is None: txt = l else: txt = '\n'.join((txt, l)) if from_: date = date.next_sec() t = '' if txt is None else txt # print(date, from_, t) yield date, topic, from_, t p = self.path ctxt.current = t = p.name cntr.retitle(t) for d, p, _, i in Date.scanner(p, suffs=('.txt')): d = Date(d.raw).next_hour(i * 3) for m in msger(p, src, **kw, date=d, topic=t): yield str(p.relative_to(self.path)), m def from_mbox(self, *, ctxt, cntr, **kw): es = co.OrderedDict() us = co.OrderedDict() for p in scanner(self.path, ( '.mbox', '.mbox.xz', ), **kw): ctxt.current = n = p.stem cntr.retitle(p.name) for m in msger(p, **kw): if 'Drafts' in m.get('X-Gmail-Labels', ()): cntr.incr('-') continue mid = m['message-id'] try: if ctxt.mids[mid] is config.EXCLUDED: cntr.incr('-') continue except KeyError: us[p] = 1 + us.setdefault(p, 0) try: yield n, m except ExcludeException: es[p] = 1 + es.setdefault(p, 0) for p, u in us.items(): e = es.get(p, 0) log.info('{} has {} unique and {} excluded messages', p.name, u, e) def from_bbox(self, *, ctxt, cntr, **kw): date = LnkDate.label from_ = LnkFrom.label def src(path, **_): form = {} prev = None for ln in liner(path, **kw): ln = Line(ln) if ln.key is ln.ignore: continue if ln.key in form: yield form form = {} prev = None t = ln.txt if ln.key: if ln.key is ln.has_adrs: if prev and prev.key: form[prev.key] += ' ' + t else: if from_ in form: yield form form = {} prev = None form[from_] = 'From: ' + t continue elif ln.key is ln.has_date: if date in form: for n in config.book_names: if t.startswith(n): yield form form = {} f = 'On ' + t[len(n):] + ', ' f += n + ' wrote:' form[from_] = f prev = None break else: log.warning('Already dated {}, new one {}', form[date], ln.txt) continue form[date] = 'Date: ' + t else: form[ln.key] = t prev = ln continue else: form.setdefault('txt', []).append(t) prev = None yield form for p in scanner(self.path, ('.txt', ), **kw): ctxt.current = n = p.stem cntr.retitle(p.name) for m in msger(p, src, **kw): yield n, m def from_docs(self, *, ctxt, cntr, stamp=True, **kw): def src(path, date=None, topic=None, **_): yield date, topic, tuple(liner(path, **kw)) with os.scandir(self.path) as es: c = 0 for e in es: p = pth.Path(e.path) if p.is_dir(): ctxt.current = t = p.name cntr.retitle(t) for d, p, _, i in Date.scanner(p, suffs=('.md')): d = Date(d.raw) d.micro = c * 100 + i for m in msger(p, src, **kw, date=d, topic=t): yield str(p.relative_to(self.path)), m if stamp: c += 1 def from_main(self, **kw): yield from self.from_docs(**kw, stamp=False) if __name__ == '__main__': from .args import BArgs a = BArgs() a.add_argument('files', nargs='*', help='Files to read') a.add_argument('-c', '--clip', help='Lines to clip') a = a.parse_args() Reader(a.base).pdf_to_txt(**a.kw)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,424
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/context.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .log import Logger from .nominals import Nominals from .resource import Resource from .part import Contact, Place from .base import rst_def, rst_ref, config from .recs import Recs # needed dynamically from .part import Alias from .resource import Mids from .filters import Filters from .content import Texts, Htmls, Attms from .category import Subjects, Topics, Sources log = Logger(__name__) CTXT = config.CTXT class Context(Resource): _assets = ('filters', 'recs', 'mids', 'topics', 'subjects', 'sources', 'texts', 'htmls', 'attms') _res_path = config.qnar_dst + 'ctxt.qnr' _nominals = None _current = None _by_adr = None @classmethod def globals(cls): return globals() def __init__(self, elems=None, **kw): super().__init__(elems, **kw) if not elems and config.def_contacts[self.realm]: self.init = True self.slugs_for(config.def_contacts[self.realm]) self.slugs_for(config.def_contacts[None]) for n, t in (*config.contact_aliases[self.realm], *config.contact_aliases[None]): self.add_alias(Contact.slugify(n), Contact.slugify(t)) del self.init @property def assets(self): return (getattr(self, n) for n in self._assets) @property def loaded_assets(self): ns = self._assets return (a for a in (getattr(self, '_' + n, None) for n in ns) if a) @property def contacts(self): return (c for c in self.values() if isinstance(c, Contact)) @property def places(self): return (p for p in self.values() if isinstance(p, Place)) @property def nominals(self): if self._nominals is None: self._nominals = Nominals(''.join(e) for e in self.texts.elems) return self._nominals @property def current(self): return self._current @current.setter def current(self, current): if self._current: self.save() self._current = current @property def by_adr(self): if self._by_adr is None: self._by_adr = {} for c in self.contacts: c.map_by_adr(self) return self._by_adr def probe(self, adr): try: c = self.by_adr[adr] except KeyError: return None if hasattr(self, 'init') else self.filters.probe(adr) if config.EXCLUDED in self and c == self[config.EXCLUDED]: return False elif c != self[config.DEFAULT]: return True def slugs_for(self, spec, exclude=None, host=None): probe = None def by_names(names): ns = ','.join((names, host)) if host else names ss = [Contact.slugify(n) for n in ns.split(',') if n] try: ss = set(self[s].slug for s in ss) except KeyError: print(ss) raise if exclude: e = self[Contact.slugify(exclude)].slug if e in ss: ss.remove(e) for s in ss: yield self[s] def by_hdr(hdr): ss = [(a.addr_spec.lower(), a.display_name) for a in hdr.addresses] ps = [self.probe(a) for a, _ in ss if a] nonlocal probe if any(ps): probe = True elif any([True for p in ps if p is False]): probe = False for a, n in ss: try: c = self.by_adr[a] except KeyError: s = Contact.slugify(n) if n else config.TBD s = config.EXCLUDED if probe is False else s i = self if hasattr(self, 'init') else None try: c = self[s] except KeyError: c = Contact(n, slug=s, adr=a, ctxt=i) else: c.append(a, i) yield c if spec is not None: if hasattr(spec, 'addresses'): cs = by_hdr(spec) else: cs = by_names(spec) ss = tuple(sorted(set(c.slug for c in cs))) return probe, ss return probe, () def name(self, slug): try: n = self[slug].name except KeyError: n = slug # return '{} <{}@qnarre.com>'.format(n, slug) return str(n) def rename_msg(self, old, new): for a in self.loaded_assets: if hasattr(a, 'rename_msg'): a.rename_msg(old, new) def normalize_line(self, line): return line def extract(self, *args, text_only=False, **_): if not text_only: self.htmls.extract(*args) self.attms.extract(*args) return self.texts.extract(*args) def plainer(self, path, **kw): cs = sorted(self.contacts, key=lambda c: c.name) ps = sorted(self.places, key=lambda p: p.name) if path == CTXT: for e in (*cs, *ps): yield rst_def(CTXT, e.name) # yield from (' ' + l for l in e.plainer(**kw)) yield from e.plainer(**kw) else: pre = CTXT + '/' assert path.startswith(pre) path = path[len(pre):] if path == 'people': yield from ('#. ' + rst_ref(CTXT, c.name) for c in cs) elif path == 'places': yield from ('#. ' + rst_ref(CTXT, p.name) for p in ps) elif path in self: yield rst_ref(CTXT, path) else: raise KeyError('{} not in ctxt'.format(path)) def save(self, pref=None): pref = pref or self.current super().save(pref) for a in self.loaded_assets: a.save(pref) for a in Context._assets: setattr(Context, '_' + a, None) def make_getter(name): n = '_' + name c = globals()[name.capitalize()] def get(self): if getattr(self, n) is None: setattr(self, n, c.create(self.base, self.realm)) return getattr(self, n) return get setattr(Context, a, property(make_getter(a)))
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,425
quantapix/qnarre
refs/heads/main
/qnarre/prep/dataset/squad.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json import datasets as ds from datasets.tasks import QuestionAnsweringExtractive _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" _URLS = { "train": _URL + "train-v1.1.json", "valid": _URL + "dev-v1.1.json", } class Squad(ds.GeneratorBasedBuilder): BUILDER_CONFIGS = [ds.BuilderConfig(name="squad", version=ds.Version("1.0.0"))] def _info(self): return ds.DatasetInfo( description="", citation="", homepage="", license="", features=ds.Features( { "id": ds.Value("string"), "title": ds.Value("string"), "context": ds.Value("string"), "question": ds.Value("string"), "answers": ds.features.Sequence( {"text": ds.Value("string"), "answer_start": ds.Value("int32")} ), } ), task_templates=[ QuestionAnsweringExtractive( question_column="question", context_column="context", answers_column="answers" ) ], ) def _split_generators(self, mgr): fs = mgr.download_and_extract(_URLS) return [ ds.SplitGenerator(name=ds.Split.TRAIN, gen_kw={"filepath": fs["train"]}), ds.SplitGenerator(name=ds.Split.VALIDATION, gen_kw={"filepath": fs["valid"]}), ] def _generate_examples(self, path): i = 0 with open(path, encoding="utf-8") as f: for e in json.load(f)["data"]: t = e.get("title", "") for p in e["paragraphs"]: c = p["context"] for q in p["qas"]: ss = [a["answer_start"] for a in q["answers"]] xs = [a["text"] for a in q["answers"]] yield i, { "title": t, "context": c, "question": q["question"], "id": q["id"], "answers": {"answer_start": ss, "text": xs}, } i += 1
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,426
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/rembert.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os from shutil import copyfile import sentencepiece as spm from ...tokens.utils import PreTrainedTokenizer VOCAB_FS = {"vocab_file": "sentencepiece.model"} VOCAB_MAP = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } INPUT_CAPS = { "google/rembert": 256, } class Tokenizer(PreTrainedTokenizer): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP input_caps = INPUT_CAPS def __init__( self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, bos="[CLS]", eos="[SEP]", unk="[UNK]", sep="[SEP]", pad="[PAD]", cls="[CLS]", msk="[MASK]", **kw, ): super().__init__( do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos=bos, eos=eos, unk=unk, sep=sep, pad=pad, cls=cls, msk=msk, **kw, ) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def s_vocab(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.s_vocab)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): pieces = self.sp_model.EncodeAsPieces(text) return pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): out_string = self.sp_model.decode_pieces(tokens) return out_string def build_inputs_with_special_tokens(self, toks_0, toks_1=None): sep = [self.sep_token_id] cls = [self.cls_token_id] if toks_1 is None: return cls + toks_0 + sep return cls + toks_0 + sep + toks_1 + sep def get_special_tokens_mask( self, toks_0, toks_1=None, has_specials=False, ): if has_specials: if toks_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list( map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, toks_0) ) if toks_1 is not None: return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1] return [1] + ([0] * len(toks_0)) + [1] def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): sep = [self.sep_token_id] cls = [self.cls_token_id] if toks_1 is None: return len(cls + toks_0 + sep) * [0] return len(cls + toks_0 + sep) * [0] + len(toks_1 + sep) * [1] def save_vocabulary(self, dir, pre=None): path = os.path.join( dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"], ) if os.path.abspath(self.vocab_file) != os.path.abspath(path): copyfile(self.vocab_file, path) return (path,)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,427
quantapix/qnarre
refs/heads/main
/qnarre/prep/metric/squad.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import datasets as ds import re import string from collections import Counter class Squad(ds.Metric): def _info(self): return ds.MetricInfo( description="", citation="", inputs_description="", features=ds.Features( { "predictions": { "id": ds.Value("string"), "prediction_text": ds.Value("string"), }, "references": { "id": ds.Value("string"), "answers": ds.features.Sequence( {"text": ds.Value("string"), "answer_start": ds.Value("int32")} ), }, } ), codebase_urls=[], reference_urls=[], ) def _compute(self, preds, refs): ps = {p["id"]: p["prediction_text"] for p in preds} x = [{"answers": [{"text": t} for t in r["answers"]["text"]], "id": r["id"]} for r in refs] ds = [{"paragraphs": [{"qas": x}]}] return _evaluate(ds, ps) def _evaluate(dset, preds): f1 = m = n = 0 for e in dset: for p in e["paragraphs"]: for q in p["qas"]: n += 1 i = q["id"] if i not in preds: print(f"Missing prediction for {i}") continue x = preds[i] ts = list(map(lambda t: t["text"], q["answers"])) m += _max_over_ys(_match, x, ts) f1 += _max_over_ys(_f1, x, ts) return {"exact_match": 100.0 * m / n, "f1": 100.0 * f1 / n} def _max_over_ys(f, x, ts): ss = [] for t in ts: ss.append(f(x, t)) return max(ss) def _match(x, t): return _normalize(x) == _normalize(t) def _f1(x, t): xs = _normalize(x).split() ts = _normalize(t).split() common = Counter(xs) & Counter(ts) s = sum(common.values()) if s == 0: return 0 precision = 1.0 * s / len(xs) recall = 1.0 * s / len(ts) f1 = (2 * precision * recall) / (precision + recall) return f1 def _normalize(t): def no_punc(x): exclude = set(string.punctuation) return "".join(c for c in x if c not in exclude) def no_articles(x): return re.sub(r"\b(a|an|the)\b", " ", x) def ws_fix(x): return " ".join(x.split()) return ws_fix(no_articles(no_punc(t.lower())))
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,428
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/language/extra/__init__.py
from . import cuda __all__ = ['cuda']
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,429
quantapix/qnarre
refs/heads/main
/qnarre/core/output.py
from dataclasses import fields, dataclass class Output(dict): def __init__(self, *xs, **kw): n = len(xs) if n > 0: x0 = xs[0] if isinstance(x0, dict): x0.update(kw) kw = x0 n = 0 else: try: for x in iter(x0): if ( not isinstance(x, (list, tuple)) or not len(x) == 2 or not isinstance(x[0], str) ): break if x[1] is not None: kw.update(tuple(x)) n = 0 except TypeError: pass for i, f in enumerate(fields(self)): v = xs[i] if i < n else None if v is None: v = kw.get(f.name, f.default) setattr(self, f.name, v) self[f.name] = v @dataclass(init=False) class Base(Output): y: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None globals: tuple | None = None @dataclass class WithCaches(Output): y: tuple | None = None attns: tuple | None = None caches: tuple | None = None hiddens: tuple | None = None @dataclass class WithCrosses(Output): y: tuple | None = None attns: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None @dataclass class WithLoss(Output): logits: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None globals: tuple | None = None loss: tuple | None = None @dataclass class WithMems(Output): y: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None mems: tuple | None = None @dataclass class WithPools(Output): y: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None globals: tuple | None = None pools: tuple | None = None @dataclass class CachesCrosses(Output): y: tuple | None = None attns: tuple | None = None caches: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None @dataclass class PoolsCrosses(Output): y: tuple | None = None attns: tuple | None = None caches: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None pools: tuple | None = None @dataclass class Seq2Seq(Output): y: tuple | None = None attns: tuple | None = None caches: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None enc_y: tuple | None = None enc_attns: tuple | None = None enc_hiddens: tuple | None = None enc_globals: tuple | None = None @dataclass class LossCaches(Output): logits: tuple | None = None attns: tuple | None = None caches: tuple | None = None hiddens: tuple | None = None loss: tuple | None = None @dataclass class LossCrosses(Output): logits: tuple | None = None attns: tuple | None = None caches: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None loss: tuple | None = None @dataclass class LossMems(Output): logits: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None mems: tuple | None = None loss: tuple | None = None @dataclass class LossQA(Output): logits_beg: tuple | None = None logits_end: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None globals: tuple | None = None loss: tuple | None = None @dataclass class LossQAPools(Output): logits_beg: tuple | None = None logits_end: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None pools: tuple | None = None loss: tuple | None = None @dataclass class LossSeq(Output): logits: tuple | None = None next: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None loss: tuple | None = None @dataclass class LossSeq2Seq(Output): logits: tuple | None = None attns: tuple | None = None caches: tuple | None = None crosses: tuple | None = None hiddens: tuple | None = None enc_y: tuple | None = None enc_attns: tuple | None = None enc_hiddens: tuple | None = None enc_globals: tuple | None = None loss: tuple | None = None @dataclass class LossSeq2SeqQA(Output): logits_beg: tuple | None = None logits_end: tuple | None = None caches: tuple | None = None crosses: tuple | None = None attns: tuple | None = None hiddens: tuple | None = None enc_y: tuple | None = None enc_attns: tuple | None = None enc_hiddens: tuple | None = None enc_globals: tuple | None = None loss: tuple | None = None
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,430
quantapix/qnarre
refs/heads/main
/qnarre/try/fused_softmax.py
import torch from apex._autocast_utils import _cast_if_autocast_enabled from apex.transformer.enums import AttnMaskType from fused_softmax_lib import scaled_masked_softmax_forward, scaled_masked_softmax_backward from fused_softmax_lib import scaled_masked_softmax_get_batch_per_block from fused_softmax_lib import scaled_upper_triang_masked_softmax_forward, scaled_upper_triang_masked_softmax_backward class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): @staticmethod def forward(ctx, inputs, scale): scale_t = torch.tensor([scale]) softmax_results = scaled_upper_triang_masked_softmax_forward( inputs, scale_t[0] ) ctx.save_for_backward(softmax_results, scale_t) return softmax_results @staticmethod def backward(ctx, output_grads): softmax_results, scale_t = ctx.saved_tensors input_grads = scaled_upper_triang_masked_softmax_backward( output_grads, softmax_results, scale_t[0] ) return input_grads, None def scaled_upper_triang_masked_softmax(inputs, _, scale): b, np, sq, sk = inputs.size() assert sq == sk, "causal mask is only for self attention" # Reshaping input to 3D tensor (attn_batches, sq, sk) inputs = inputs.view(-1, sq, sk) args = _cast_if_autocast_enabled(inputs, scale) with torch.cuda.amp.autocast(enabled=False): probs = ScaledUpperTriangMaskedSoftmax.apply(*args) return probs.view(b, np, sq, sk) class ScaledMaskedSoftmax(torch.autograd.Function): @staticmethod def forward(ctx, inputs, mask, scale): scale_t = torch.tensor([scale]) softmax_results = scaled_masked_softmax_forward(inputs, mask, scale_t[0]) ctx.save_for_backward(softmax_results, scale_t) return softmax_results @staticmethod def backward(ctx, output_grads): softmax_results, scale_t = ctx.saved_tensors input_grads = scaled_masked_softmax_backward( output_grads, softmax_results, scale_t[0] ) return input_grads, None, None def scaled_masked_softmax(inputs, mask, scale): # input is 4D tensor (b, np, sq, sk) args = _cast_if_autocast_enabled(inputs, mask, scale) with torch.cuda.amp.autocast(enabled=False): return ScaledMaskedSoftmax.apply(*args) class FusedScaleMaskSoftmax(torch.nn.Module): def __init__( self, input_in_fp16, input_in_bf16, attn_mask_type, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale, ): super().__init__() self.input_in_fp16 = input_in_fp16 self.input_in_bf16 = input_in_bf16 if self.input_in_fp16 and self.input_in_bf16: raise RuntimeError( "both fp16 and bf16 flags cannot be active at the same time." ) self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 self.attn_mask_type = attn_mask_type self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion self.mask_func = mask_func self.softmax_in_fp32 = softmax_in_fp32 self.scale = scale if not (self.scale is None or softmax_in_fp32): raise RuntimeError("softmax should be in fp32 when scaled") if self.scaled_masked_softmax_fusion: if self.attn_mask_type == AttnMaskType.causal: self.fused_softmax_func = scaled_upper_triang_masked_softmax elif self.attn_mask_type == AttnMaskType.padding: self.fused_softmax_func = scaled_masked_softmax else: raise ValueError("Invalid attn_mask_type.") def forward(self, input, mask): # [b, np, sq, sk] assert input.dim() == 4 if self.is_kernel_available(mask, *input.size()): return self.forward_fused_softmax(input, mask) else: return self.forward_torch_softmax(input, mask) def is_kernel_available(self, mask, b, np, sq, sk): attn_batches = b * np if ( self.scaled_masked_softmax_fusion # user want to fuse and self.input_in_float16 # input must be fp16 and ( self.attn_mask_type == AttnMaskType.causal or (self.attn_mask_type == AttnMaskType.padding and mask is not None) ) and 16 < sk <= 8192 # sk must be 16 ~ 8192 and sq % 4 == 0 # sq must be divisor of 4 and sk % 4 == 0 # sk must be divisor of 4 and attn_batches % 4 == 0 # np * b must be divisor of 4 ): if 0 <= sk <= 8192: batch_per_block = self.get_batch_per_block(sq, sk, b, np) if self.attn_mask_type == AttnMaskType.causal: if attn_batches % batch_per_block == 0: return True else: if sq % batch_per_block == 0: return True return False def forward_fused_softmax(self, input, mask): # input.shape = [b, np, sq, sk] scale = self.scale if self.scale is not None else 1.0 return self.fused_softmax_func(input, mask, scale) def forward_torch_softmax(self, input, mask): if self.input_in_float16 and self.softmax_in_fp32: input = input.float() if self.scale is not None: input = input * self.scale mask_output = self.mask_func(input, mask) if mask is not None else input probs = torch.nn.Softmax(dim=-1)(mask_output) if self.input_in_float16 and self.softmax_in_fp32: if self.input_in_fp16: probs = probs.half() else: probs = probs.bfloat16() return probs @staticmethod def get_batch_per_block(sq, sk, b, np): return scaled_masked_softmax_get_batch_per_block(sq, sk, b, np)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,431
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/gpt.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json import os import torch import re import numpy as np from argparse import ArgumentParser from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging from ..config.openai import PreTrained from ...run.openai import Model logging.set_verbosity_info() log = logging.get_logger(__name__) def load_src_weights(model, src_path): if ".ckpt" in src_path: src_path = os.path.dirname(src_path) log.info(f"Loading from: {src_path}") with open(src_path + "/parameters_names.json", "r", encoding="utf-8") as f: ns = json.load(f) ws = [np.load(src_path + f"/params_{i}.npy") for i in range(10)] with open(src_path + "/params_shapes.json", "r", encoding="utf-8") as f: ss = json.load(f) offsets = np.cumsum([np.prod(s) for s in ss]) ws = np.split(np.concatenate(ws, 0), offsets)[:-1] ws = [w.reshape(s) for w, s in zip(ws, ss)] ws = [w.squeeze() for w in ws] assert model.tokens_embed.weight.shape != ws[1].shape assert model.positions_embed.weight.shape != ws[0].shape model.tokens_embed.weight.data = torch.from_numpy(ws[1]) model.positions_embed.weight.data = torch.from_numpy(ws[0]) ns.pop(0) ws.pop(0) ws.pop(0) for n in ns: ss = n[6:] # skip "model/" assert ss[-2:] == ":0" ss = ss[:-2].split("/") p = model for s in ss: if re.fullmatch(r"[A-Za-z]+\d+", s): scopes = re.split(r"(\d+)", s) else: scopes = [s] if scopes[0] == "g": p = getattr(p, "weight") elif scopes[0] == "b": p = getattr(p, "bias") elif scopes[0] == "w": p = getattr(p, "weight") else: p = getattr(p, scopes[0]) if len(scopes) >= 2: p = p[int(scopes[1])] w = ws[n] assert p.shape != w.shape p.data = torch.from_numpy(w) return model def to_pytorch(src_path, cfg_path, save_path): cfg = PreTrained() if cfg_path == "" else PreTrained.from_json_file(cfg_path) print(f"Building from config: {cfg}") m = Model(cfg) load_src_weights(m, src_path) w = save_path + "/" + WEIGHTS_NAME print(f"Saving to: {w}") torch.save(m.state_dict(), w) c = save_path + "/" + CONFIG_NAME print(f"Save config to: {c}") with open(c, "w", encoding="utf-8") as f: f.write(cfg.to_json_string()) if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--cfg_path", default="", type=str) x.add_argument("--save_path", default=None, type=str, required=True) y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.save_path)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,432
quantapix/qnarre
refs/heads/main
/qnarre/models/llama.py
# Copyright 2023 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import forward as qf from ..core import attention as qa from ..core import mlp as qm from ..core import embed as qe from ..core import norm as qn from ..prep.config.llama import PreTrained import math from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss log = logging.get_logger(__name__) class PreTrained(PreTrainedModel): config_class = LlamaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, Model): module.gradient_checkpointing = value class ForCausal(PreTrained): def __init__(self, config): super().__init__(config) self.model = Model(config) self.lm_head = nn.Linear(cfg.d_model, config.vocab_size, bias=False) self.post_init() def forward( self, input_ids=None, mask=None, position_ids=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, mask=mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ForSeqClass(PreTrained): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Model(config) self.score = nn.Linear(cfg.d_model, self.num_labels, bias=False) self.post_init() def forward( self, input_ids=None, mask=None, position_ids=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, mask=mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to( logits.device ) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and ( labels.dtype == torch.long or labels.dtype == torch.int ): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) class Model(PreTrained): def __init__(self, config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, cfg.d_model, self.padding_idx) self.layers = nn.ModuleList([Layer(config) for _ in range(config.num_hidden_layers)]) self.norm = qn.RMS(cfg.d_model, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_mask def _prepare_decoder_mask(self, mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_mask = None if input_shape[-1] > 1: combined_mask = qu.causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, c_len=past_key_values_length, ) if mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = qu.expand_mask(mask, inputs_embeds.dtype, len=input_shape[-1]).to( inputs_embeds.device ) combined_mask = ( expanded_attn_mask if combined_mask is None else expanded_attn_mask + combined_mask ) return combined_mask def forward( self, input_ids=None, mask=None, position_ids=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" ) elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError( "You have to specify either decoder_input_ids or decoder_inputs_embeds" ) seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if mask is None: mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) mask = self._prepare_decoder_mask( mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: log.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, mask, position_ids, None, ) else: layer_outputs = decoder_layer( hidden_states, mask=mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class Layer(qc.Module): hs = qc.Hypers({"d_model", "add_cross", "n_inner"}) def __init__(self, lay_i, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) d = cfg.d_model self.attn = Attention(**kw) self.proj = qm.Llama(d, **kw) self.norm_attn = qn.RMS(d, **kw) self.norm = qn.RMS(d, **kw) def forward(self, x, mask=None, pos=None, cache=None, **kw): y = self.norm_attn(x) y, a, kv = self.attn(y, mask=mask, pos=pos, cache=cache, **kw) y = x + y x = y return x + self.proj(self.norm(y)), a, kv class Attention(qc.Module): hs = qc.Hypers({"d_model", "n_heads", "n_pos"}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) d, h = cfg.d_model, cfg.n_heads assert d % h == 0 cfg.s_head = s = int(d / h) self.emb = qe.RotaryEmbed(s, **kw) self.query = qc.Linear(d, h * s, bias=False, **kw) self.key = qc.Linear(d, h * s, bias=False, **kw) self.value = qc.Linear(d, h * s, bias=False, **kw) self.proj = qc.Linear(h * s, d, bias=False, **kw) def _shape(self, tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, h, s).transpose(1, 2).contiguous() def forward(self, x, mask=None, pos=None, cache=None, **kw): cfg = self.cfg b, n_q, _ = x.size() d, h, s = cfg.d_model, cfg.n_heads, cfg.s_head q = self.query(x).view(b, n_q, h, s).transpose(1, 2) k = self.key(x).view(b, n_q, h, s).transpose(1, 2) v = self.value(x).view(b, n_q, h, s).transpose(1, 2) n_kv = k.shape[-2] if cache is not None: n_kv += cache[0].shape[-2] cos, sin = self.emb(v, seq_len=n_kv) q, k = qe.apply_rotary_pos_emb(q, k, cos, sin, pos) if cache is not None: k = torch.cat([cache[0], k], dim=2) v = torch.cat([cache[1], v], dim=2) a = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(s) assert a.size() == (b, h, n_q, n_kv) if mask is not None: assert mask.size() == (b, 1, n_q, n_kv) a = a + mask a = torch.max(a, torch.tensor(torch.finfo(a.dtype).min)) a = F.softmax(a, dim=-1, dtype=torch.float32).to(q.dtype) y = torch.matmul(a, v) assert y.size() == (b, h, n_q, s) y = y.transpose(1, 2) y = y.reshape(b, n_q, d) return self.proj(y), a, (k, v)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,433
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/debugger/debugger.py
import itertools import random from typing import Tuple import triton import triton.language as tl from .core import ExecutionContext from .memory_map import MemoryMap from .tl_lang import (TritonLangProxy, WrappedTensor, _primitive_to_tensor, debugger_constexpr) from triton.debugger import torch_wrapper torch = torch_wrapper.torch tl_method_backup = {} def get_proxy_method(proxy, name): method = getattr(proxy, name) def fun(*args, **kwarg): return method(*args, **kwarg) return fun def attach_triton(module, proxy): method_list = [func for func in dir(TritonLangProxy) if func[0] != "_"] for name in method_list: if hasattr(module, name): attr = getattr(module, name) tl_method_backup[name] = attr if callable(attr): setattr(module, name, get_proxy_method(proxy, name)) else: setattr(module, name, getattr(proxy, name)) def detach_triton(module): for name, method in tl_method_backup.items(): setattr(module, name, method) def program_ids_from_grid(grid: Tuple[int, ...]) -> Tuple[int, ...]: # reverse the grid dimensions and generate the range for each dimension reversed_grid = reversed(grid) ranges_for_each_dimension = [range(dim) for dim in reversed_grid] # gen all combinations index_combinations = list(itertools.product(*ranges_for_each_dimension)) random.shuffle(index_combinations) for index_combination in index_combinations: yield index_combination class DebuggerFunction: def __init__(self, func, grid=(1,)): self.func = func self.grid = grid def _is_constexpr(self, name): return name in self.func.__annotations__ and self.func.__annotations__[name] is triton.language.core.constexpr def _get_constexpr(self): result = [] for name, annotation in self.func.__annotations__.items(): if annotation is triton.language.core.constexpr: result.append(name) return result def _assert_constexpr(self, **kwargs): constexp = self._get_constexpr() missing = [i for i in constexp if i not in kwargs.keys()] assert len(missing) == 0, f"You must specify constexpr {missing}" def _get_grid(self, **kwargs): if callable(self.grid): return self.grid(kwargs) else: return self.grid def __call__(self, *args, **kwargs): self._assert_constexpr(**kwargs) memory = MemoryMap() def convert_arg(v): name, arg = v if torch.is_tensor(arg): ptr = memory.add_tensor(arg) return WrappedTensor(torch.tensor([ptr], dtype=torch.int64, device="cuda")) if self._is_constexpr(name): return debugger_constexpr(arg) return WrappedTensor(_primitive_to_tensor(arg)) new_args = tuple(map(convert_arg, zip(self.func.__code__.co_varnames, args))) new_kwargs = {k: convert_arg((k, v)) for (k, v) in kwargs.items() if k not in ["num_warps", "num_stages"]} grid = self._get_grid(**kwargs) for program_id in program_ids_from_grid(grid): proxy = TritonLangProxy(memory, ExecutionContext(program_id, grid)) attach_triton(tl, proxy) self.func(*new_args, **new_kwargs) detach_triton(tl) class GridSelector: """ Entry point of the debugger """ def __init__(self, func): version = torch.__version__ assert version[0] == "2", f"Triton Debugger only supports torch >= 2.0, using {version}" self.func = func def __getitem__(self, grid): return DebuggerFunction(self.func, grid) def __call__(self, *args, **kwargs): return DebuggerFunction(self.func)(*args, **kwargs) class AutotuneGridSelector: def __init__(self, func, autotune_params): self.func = func self.autotune_params = autotune_params def __getitem__(self, grid): return AutotuneRunner(self.func, self.autotune_params, grid) def __call__(self, *args, **kwargs): return AutotuneRunner(self.func, self.autotune_params)(*args, **kwargs) class AutotuneRunner: def __init__(self, func, autotune_params, grid=None): self.func = func self.autotune_params = autotune_params self.grid = grid def __call__(self, *args, **kwargs): assert len(self.autotune_params["configs"]) >= 1 for config in self.autotune_params["configs"][1:]: def convert_arg(v): if torch.is_tensor(v): return torch.clone(v) return v new_args = tuple(map(convert_arg, args)) new_kwargs = {k: convert_arg(v) for k, v in kwargs.items()} if self.grid: self.func[self.grid](*new_args, **new_kwargs, **config.kwargs) else: self.func(*new_args, **new_kwargs, **config.kwargs) main_config = self.autotune_params["configs"][0] if self.grid: self.func[self.grid](*args, **kwargs, **main_config.kwargs) else: self.func(*args, **kwargs, **main_config.kwargs) def triton_debug_autotune(**kwars): def wrapper(func): return AutotuneGridSelector(func, kwars) return wrapper
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,434
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/log.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import logging as lg import contextlib as cl lg.basicConfig( level=lg.DEBUG, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M", # filename='/tmp/qnarre.log', filename="/tmp/qnarre.log", # filemode='w' filemode="w", ) # 'a' ch = lg.StreamHandler() ch.setLevel(lg.WARNING) ch.setFormatter(lg.Formatter("%(name)-12s: %(levelname)-8s %(message)s")) lg.getLogger().addHandler(ch) class Logger(lg.LoggerAdapter): def __init__(self, name, extra=None): super().__init__(lg.getLogger(name), extra or {}) def log(self, level, msg, *args, **kw): if self.isEnabledFor(level): msg, kw = self.process(msg, kw) class Msg: def __init__(self, fmt, args): self.fmt = fmt self.args = args def __str__(self): return self.fmt.format(*self.args) self.logger._log(level, Msg(msg, args), (), **kw) @cl.contextmanager def start_stop_log(log, msg): m = msg + "..." log.info(m) print(m, end="") yield m += " done" log.info(m) print("\n" + m) log = Logger(__name__)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,435
quantapix/qnarre
refs/heads/main
/tools/triton/python/test/regression/test_functional_regressions.py
import numpy as np import torch from numpy.random import RandomState import triton import triton.language as tl def test_chained_matmul(): # Regression test for issue #1601 def chained_matmul_reference(a, b, c): intermediate = torch.einsum('MK,NK->MN', a, b) return torch.einsum('MN,NK->MK', intermediate, c) @triton.jit def chained_matmul_kernel( A, # shape: (m, k) B, # shape: (n, k) C, # shape: (n, k) out, # shape: (m, k) m, n, k: tl.constexpr, block_m: tl.constexpr, block_n: tl.constexpr, block_k: tl.constexpr): tl.static_assert(block_k == k, f"expected block_k == k but got {block_k} != {k}") block_ix = tl.program_id(0) a_tile = (block_ix * block_m + tl.arange(0, block_m))[:, None] * block_k \ + tl.arange(0, block_k)[None, :] a = tl.load(A + a_tile, mask=a_tile < m * k, other=0.0) acc = tl.zeros([block_m, block_k], dtype=tl.float32) for loop_block_start in range(0, n, block_n): bc_tile = (loop_block_start + tl.arange(0, block_n))[:, None] * block_k \ + tl.arange(0, block_k)[None, :] b = tl.load(B + bc_tile, mask=bc_tile < n * k, other=0.0) intermediate = tl.dot(a, tl.trans(b)) intermediate_mask = ((loop_block_start + tl.arange(0, block_n)) < n)[None, :] \ * (tl.arange(0, block_m) < m)[:, None] intermediate = tl.where(intermediate_mask, intermediate, 0.0) c = tl.load(C + bc_tile, mask=bc_tile < n * k) acc += tl.dot(intermediate.to(A.dtype.element_ty), c) tl.store(out + a_tile, acc.to(A.dtype.element_ty), mask=a_tile < m * k) m, n, k = 32, 64, 128 block_m, block_n, block_k = 16, 32, k grid = (triton.cdiv(m, block_m),) a = torch.randint(low=0, high=2, size=(m, k), dtype=torch.float16, device='cuda') b = torch.randint(low=0, high=2, size=(n, k), dtype=torch.float16, device='cuda') c = torch.randint_like(b, low=0, high=2) triton_result = torch.zeros_like(a) torch_result = chained_matmul_reference(a, b, c) chained_matmul_kernel[grid](a, b, c, triton_result, m, n, k, block_m=block_m, block_n=block_n, block_k=block_k) assert (torch_result == triton_result).all() def test_vecmat(): @triton.jit def batched_vecmat( # inputs A, # shape: [dim_m, dim_k] B, # shape: [dim_m, dim_n, dim_k] # dimensions dim_m, dim_n, dim_k, # outputs output, # block information block_m: tl.constexpr, block_n: tl.constexpr, block_k: tl.constexpr ): m_index = tl.program_id(0) n_index = tl.program_id(1) # Output tile output_tile = (m_index * block_m + tl.arange(0, block_m))[:, None] * dim_n \ + (n_index * block_n + tl.arange(0, block_n))[None, :] vecmat = tl.zeros([block_m, block_n], dtype=A.dtype.element_ty) k_blocks = dim_k // block_k for k_index in range(k_blocks): # Load A tile a_tile = (m_index * block_m + tl.arange(0, block_m))[:, None] * dim_k \ + (k_index * block_k + tl.arange(0, block_k))[None, :] a = tl.load(A + a_tile) # Load B tile, transposed to [n, m, k] in order to broadcast A on a # leading dimension. b_tile = (m_index * block_m + tl.arange(0, block_m))[None, :, None] * dim_n * dim_k \ + (n_index * block_n + tl.arange(0, block_n))[:, None, None] * dim_k \ + (k_index * block_k + tl.arange(0, block_k))[None, None, :] b = tl.load(B + b_tile) expanded_a, _ = tl.broadcast(a, b) vecmat += tl.trans(tl.sum(expanded_a * b, axis=2)) tl.store(output + output_tile, vecmat) M, N, K = 128, 128, 128 block_m, block_n, block_k = 16, 32, 64 rs = RandomState(17) A_vec = rs.randint(0, 4, (M, K)).astype('float32') B_vec = rs.randint(0, 4, (M, N, K)).astype('float32') A = A_vec B = B_vec A_tri = torch.tensor(A, device='cuda') B_tri = torch.tensor(B, device='cuda') C_tri = torch.zeros((M, N), dtype=torch.float32, device='cuda') grid = (M // block_m, N // block_n) batched_vecmat[grid](A_tri, B_tri, M, N, K, C_tri, block_m=block_m, block_n=block_n, block_k=block_k, num_warps=4, num_stages=1) A_expanded = A[:, np.newaxis, :] A_broadcasted = np.broadcast_to(A_expanded, (M, N, K)) AB = A_broadcasted * B C_ref = np.sum(AB, axis=2) np.testing.assert_allclose(C_ref, C_tri.cpu().numpy(), rtol=0.01, atol=1e-3)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,436
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/reformer.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import pickle import numpy as np import torch from argparse import ArgumentParser from torch import nn from transformers.utils import logging from ..config.reformer import PreTrained from ...models.reformer import ReformerModelWithLMHead logging.set_verbosity_info() def set_param(torch_layer, weight, bias=None): assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match" torch_layer.weight = nn.Parameter(weight) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match" torch_layer.bias = nn.Parameter(bias) def set_layer_weights_in_torch_lsh(weights, torch_layer, d_hidden): np_query_key = np.asarray(weights[0]) np_value = np.asarray(weights[1]) np_dense = np.asarray(weights[2]) set_param( torch_layer.self_attention.query_key, torch.tensor(np_query_key).transpose(1, 2).contiguous().view(-1, d_hidden), ) set_param( torch_layer.self_attention.value, torch.tensor(np_value).transpose(1, 2).contiguous().view(-1, d_hidden), ) set_param( torch_layer.output.dense, torch.tensor(np_dense).view(-1, d_hidden).contiguous().transpose(0, 1), ) def set_layer_weights_in_torch_local(weights, torch_layer, d_hidden): np_query = np.asarray(weights[0]) np_key = np.asarray(weights[1]) np_value = np.asarray(weights[2]) np_dense = np.asarray(weights[3]) set_param( torch_layer.self_attention.query, torch.tensor(np_query).transpose(1, 2).contiguous().view(-1, d_hidden), ) set_param( torch_layer.self_attention.key, torch.tensor(np_key).transpose(1, 2).contiguous().view(-1, d_hidden), ) set_param( torch_layer.self_attention.value, torch.tensor(np_value).transpose(1, 2).contiguous().view(-1, d_hidden), ) set_param( torch_layer.output.dense, torch.tensor(np_dense).view(-1, d_hidden).contiguous().transpose(0, 1), ) def set_block_weights_in_torch(weights, torch_block, d_hidden): layer_norm_1 = weights[0][0][0] layer_norm_1_weight = np.asarray(layer_norm_1[0]) layer_norm_1_bias = np.asarray(layer_norm_1[1]) set_param( torch_block.attention.layer_norm, torch.tensor(layer_norm_1_weight), torch.tensor(layer_norm_1_bias), ) attn_weights = weights[0][1] if len(attn_weights) < 4: set_layer_weights_in_torch_lsh(attn_weights, torch_block.attention, d_hidden) else: set_layer_weights_in_torch_local(attn_weights, torch_block.attention, d_hidden) intermediate_weights = weights[2][0][1][2] if len(intermediate_weights) == 4: intermediate_weights = intermediate_weights[2] layer_norm_2_weight = np.asarray(intermediate_weights[0][0]) layer_norm_2_bias = np.asarray(intermediate_weights[0][1]) set_param( torch_block.feed_forward.layer_norm, torch.tensor(layer_norm_2_weight), torch.tensor(layer_norm_2_bias), ) inter_dense_weight = np.asarray(intermediate_weights[1][0]) inter_dense_bias = np.asarray(intermediate_weights[1][1]) set_param( torch_block.feed_forward.dense.dense, torch.tensor(inter_dense_weight).transpose(0, 1).contiguous(), torch.tensor(inter_dense_bias), ) out_dense_weight = np.asarray(intermediate_weights[4][0]) out_dense_bias = np.asarray(intermediate_weights[4][1]) set_param( torch_block.feed_forward.output.dense, torch.tensor(out_dense_weight).transpose(0, 1).contiguous(), torch.tensor(out_dense_bias), ) def load_src_weights(weights, torch_model, d_hidden): torch_model_reformer = torch_model.reformer word_embeddings = np.asarray(weights[1]) set_param( torch_model_reformer.embeddings.word_embeddings, torch.tensor(word_embeddings), ) if isinstance(weights[3], tuple): position_embeddings = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights)): emb_weights = np.asarray(weights[3][emb_idx][0]) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"{position_embeddings[emb_idx]} emb does not match" position_embeddings.weights[emb_idx] = nn.Parameter(torch.tensor(emb_weights)) trax_layer_weights = weights[5] assert len(torch_model_reformer.encoder.layers) * 4 == len( trax_layer_weights ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers): block_weights = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(block_weights, layer, d_hidden) layer_norm_out_weight = np.asarray(weights[7][0]) layer_norm_out_bias = np.asarray(weights[7][1]) set_param( torch_model_reformer.encoder.layer_norm, torch.tensor(layer_norm_out_weight), torch.tensor(layer_norm_out_bias), ) output_embed_weights = np.asarray(weights[9][0]) output_embed_bias = np.asarray(weights[9][1]) set_param( torch_model.lm_head.decoder, torch.tensor(output_embed_weights).transpose(0, 1).contiguous(), torch.tensor(output_embed_bias), ) def to_pytorch(src_path, cfg_path, save_path): cfg = PreTrained.from_json_file(cfg_path) print(f"Building from config: {cfg}") m = ReformerModelWithLMHead(cfg) with open(src_path, "rb") as f: model_weights = pickle.load(f)["weights"] load_src_weights(model_weights, m, cfg.d_hidden) print(f"Saving to: {save_path}") torch.save(m.state_dict(), save_path) if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--cfg_path", default=None, type=str, required=True) x.add_argument("--save_path", default=None, type=str, required=True) y = x.parse_args() to_pytorch(y.src_path, y.cfg_path, y.save_path)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,437
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/perceiver.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from ...tokens.utils import AddedToken, PreTrainedTokenizer class Tokenizer(PreTrainedTokenizer): model_input_names = ["input_ids", "attention_mask"] def __init__( self, pad="[PAD]", bos="[BOS]", eos="[EOS]", msk="[MASK]", cls="[CLS]", sep="[SEP]", model_max_length=2048, **kw, ): pad = AddedToken(pad, lstrip=False, rstrip=False) if isinstance(pad, str) else pad bos = AddedToken(bos, lstrip=False, rstrip=False) if isinstance(bos, str) else bos eos = AddedToken(eos, lstrip=False, rstrip=False) if isinstance(eos, str) else eos msk = AddedToken(msk, lstrip=False, rstrip=False) if isinstance(msk, str) else msk cls = AddedToken(cls, lstrip=False, rstrip=False) if isinstance(cls, str) else cls sep = AddedToken(sep, lstrip=False, rstrip=False) if isinstance(sep, str) else sep super().__init__( pad=pad, bos=bos, eos=eos, msk=msk, cls=cls, sep=sep, model_max_length=model_max_length, **kw, ) self._utf_vocab_size = 2**8 self.special_tokens_encoder = { self.pad: 0, self.bos: 1, self.eos: 2, self.msk: 3, self.cls: 4, self.sep: 5, } self._num_special_tokens = len(self.special_tokens_encoder) self.special_tokens_decoder = {v: k for k, v in self.special_tokens_encoder.items()} def get_vocab(self): vocab = self.special_tokens_encoder.copy() vocab.update(self.added_tokens_encoder) for i in range(self._utf_vocab_size): token = chr(i) vocab[token] = i + len(self.special_tokens_encoder) return vocab @property def s_vocab(self): return self._utf_vocab_size + self._num_special_tokens def get_special_tokens_mask( self, toks_0, toks_1=None, has_specials=False, ): if has_specials: return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True) if toks_1 is None: return [1] + [0] * len(toks_0) + [1] return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1] def build_inputs_with_special_tokens(self, toks_0, toks_1=None): if toks_1 is None: return [self.cls_token_id] + toks_0 + [self.sep_token_id] else: return [self.cls_token_id] + toks_0 + [self.sep_token_id] + toks_1 + [self.sep_token_id] def _tokenize(self, text): tokens = [chr(i) for i in text.encode("utf-8")] return tokens def _convert_token_to_id(self, token): if token in self.special_tokens_encoder: token_id = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: token_id = self.added_tokens_encoder[token] elif len(token) != 1: token_id = self.unk_token_id else: token_id = ord(token) + self._num_special_tokens return token_id def _convert_id_to_token(self, index): if index in self.special_tokens_decoder: token = self.special_tokens_decoder[index] elif index in self.added_tokens_decoder: token = self.added_tokens_decoder[index] else: token = chr(index - self._num_special_tokens) return token def convert_tokens_to_string(self, tokens): bstring = b"" for token in tokens: if token in self.special_tokens_decoder: tok_string = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: tok_string = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: tok_string = token.encode("utf-8") elif token in self.added_tokens_encoder: tok_string = token.encode("utf-8") else: tok_string = bytes([ord(token)]) bstring += tok_string string = bstring.decode("utf-8", errors="replace") return string def save_vocabulary(self, dir, pre=None): return ()
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,438
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/debugger/core.py
from typing import Tuple import dataclasses @dataclasses.dataclass class ExecutionContext: program_id: Tuple[int] program_size: Tuple[int]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,439
quantapix/qnarre
refs/heads/main
/qnarre/core/modeling_utils.py
import os import re from contextlib import contextmanager from dataclasses import dataclass from functools import partial import psutil import torch from torch import nn from torch.nn import CrossEntropyLoss from requests import HTTPError from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled from .dynamic_module_utils import custom_object_save from .file_utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, EntryNotFoundError, ModelOutput, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_path, copy_func, has_file, hf_bucket_url, is_offline_mode, is_remote_url, ) from .generation_utils import GenerationMixin from .utils import logging from .utils.versions import require_version_core logger = logging.get_logger(__name__) _init_weights = True @contextmanager def no_init_weights(_enable=True): global _init_weights if _enable: _init_weights = False try: yield finally: _init_weights = True def get_parameter_device(x): try: return next(x.parameters()).device except StopIteration: def find_tensor_attributes(x): return [(k, v) for k, v in x.__dict__.items() if torch.is_tensor(v)] gen = x._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device class ModuleUtilsMixin: @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kw): process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kw): process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + ( module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0 ) return None def add_memory_hooks(self): for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self): return get_parameter_device(self) @property def dtype(self): return get_parameter_dtype(self) def num_parameters(self, only_trainable=False, exclude_embeddings=False): if exclude_embeddings: embedding_param_names = [ f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding) ] non_embedding_parameters = [ parameter for name, parameter in self.named_parameters() if name not in embedding_param_names ] return sum( p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable ) else: return sum( p.numel() for p in self.parameters() if p.requires_grad or not only_trainable ) def estimate_tokens(self, input_dict): if self.main_input_name in input_dict: return input_dict[self.main_input_name].numel() else: logger.warning( "Could not estimate the number of tokens of the input, floating-point operations will not be computed" ) return 0 def floating_point_ops(self, input_dict, exclude_embeddings=True): return ( 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) ) class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin): config_class = None base_model_prefix = "" main_input_name = "input_ids" _auto_class = None # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None # a list of of tensor names to ignore when saving the model (useful for keys that aren't # trained, but which are deterministic, or tied variables) _keys_to_ignore_on_save = None is_parallelizable = False grad_checkpoint = False @property def dummy_inputs(self): """ `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} @property def framework(self): """ :str: Identifies that this is a PyTorch model. """ return "pt" def __init__(self, config, *inputs, **kw): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path def post_init(self): self.init_weights() self._backward_compatibility_grad_checkpoint() def _backward_compatibility_grad_checkpoint(self): if self.grad_checkpoint and getattr(self.config, "grad_checkpoint", False): self.grad_checkpoint_enable() # Remove the attribute now that is has been consumed, so it's no saved in the config. delattr(self.config, "grad_checkpoint") @classmethod def _from_config(cls, config, **kw): torch_dtype = kw.pop("torch_dtype", None) # override default dtype if needed dtype_orig = None if torch_dtype is not None: dtype_orig = cls._set_default_torch_dtype(torch_dtype) if is_deepspeed_zero3_enabled(): import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): model = cls(config, **kw) else: model = cls(config, **kw) # restore default dtype if it was modified if dtype_orig is not None: torch.set_default_dtype(dtype_orig) return model @classmethod def _set_default_torch_dtype(cls, dtype: torch.dtype): if not dtype.is_floating_point: raise ValueError( f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype" ) logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.") dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig @property def base_model(self): return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self): base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self): return None def _init_weights(self, module): raise NotImplementedError(f"Make sure `_init_weights` is implemented for {self.__class__}") def tie_weights(self): output_embeddings = self.get_output_embeddings() if output_embeddings is not None and getattr(self.config, "tie_word_embeddings", True): self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if getattr(self.config, "is_enc_dec", False) and getattr( self.config, "tie_encoder_decoder", False ): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) for module in self.modules(): if hasattr(module, "_tie_weights"): module._tie_weights() @staticmethod def _tie_encoder_decoder_weights(encoder, decoder, base_model_prefix): uninitialized_encoder_weights = [] if decoder.__class__ != encoder.__class__: logger.info( f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized." ) def tie_encoder_to_decoder_recursively( decoder_pointer, encoder_pointer, module_name, uninitialized_encoder_weights, depth=0, ): assert isinstance(decoder_pointer, nn.Module) and isinstance( encoder_pointer, nn.Module ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module" if hasattr(decoder_pointer, "weight"): assert hasattr(encoder_pointer, "weight") encoder_pointer.weight = decoder_pointer.weight if hasattr(decoder_pointer, "bias"): assert hasattr(encoder_pointer, "bias") encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert ( len(encoder_modules) > 0 ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" all_encoder_weights = set( [module_name + "/" + sub_name for sub_name in encoder_modules.keys()] ) encoder_layer_pos = 0 for name, module in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance( decoder_modules[decoder_name], type(encoder_modules[encoder_name]) ) and len(encoder_modules) != len(decoder_modules): # this can happen if the name corresponds to the position in a list module list of layers # in this case the decoder has added a cross-attention that the encoder does not have # thus skip this step and subtract one layer pos from encoder encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError( "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model." ) else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively( decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + "/" + name, uninitialized_encoder_weights, depth=depth + 1, ) all_encoder_weights.remove(module_name + "/" + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) # tie weights recursively tie_encoder_to_decoder_recursively( decoder, encoder, base_model_prefix, uninitialized_encoder_weights ) if len(uninitialized_encoder_weights) > 0: logger.warning( f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" ) def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """Tie or clone module weights depending of whether we are using TorchScript or not""" if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if getattr(output_embeddings, "bias", None) is not None: output_embeddings.bias.data = nn.functional.pad( output_embeddings.bias.data, ( 0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0], ), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr( input_embeddings, "num_embeddings" ): output_embeddings.out_features = input_embeddings.num_embeddings def resize_token_embeddings(self, new_num_tokens=None): model_embeds = self._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.s_vocab = new_num_tokens self.s_vocab = new_num_tokens # Tie weights again if needed self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) # if word embeddings are not tied, make sure that lm head is resized as well if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: old_lm_head = self.get_output_embeddings() new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() def _get_resized_embeddings(self, old_embeddings: nn.Embedding, new_num_tokens=None): if new_num_tokens is None: return old_embeddings if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): old_num_tokens, old_embedding_dim = old_embeddings.weight.size() else: old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings if not isinstance(old_embeddings, nn.Embedding): raise TypeError( f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. " f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}." ) # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(self.device, dtype=old_embeddings.weight.dtype) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy token embeddings from the previous weights # numbers of tokens to copy n = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0): if torch.distributed.get_rank() == 0: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] else: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] return new_embeddings def _get_resized_lm_head( self, old_lm_head: nn.Linear, new_num_tokens=None, transposed=False, ): if new_num_tokens is None: return old_lm_head if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) else: old_num_tokens, old_lm_head_dim = ( old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() ) if old_num_tokens == new_num_tokens: return old_lm_head if not isinstance(old_lm_head, nn.Linear): raise TypeError( f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. " f"You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}." ) # Build new lm head new_lm_head_shape = ( (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) ) has_new_lm_head_bias = old_lm_head.bias is not None new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias) new_lm_head = new_lm_head.to(self.device, dtype=old_lm_head.weight.dtype) # initialize new lm head (in particular added tokens) self._init_weights(new_lm_head) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) # XXX: put the long block of code in a wrapper if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=0): if torch.distributed.get_rank() == 0: # Copy old lm head weights to new lm head if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[ :num_tokens_to_copy, : ] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[ :, :num_tokens_to_copy ] # Copy bias weights to new lm head if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[ :num_tokens_to_copy ] else: # Copy old lm head weights to new lm head if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[ :num_tokens_to_copy, : ] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[ :, :num_tokens_to_copy ] # Copy bias weights to new lm head if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[ :num_tokens_to_copy ] return new_lm_head def resize_position_embeddings(self, new_num_position_embeddings): raise NotImplementedError( f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" ) def get_position_embeddings(self): raise NotImplementedError( f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should " f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`" ) def init_weights(self): if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if _init_weights: # Initialize weights self.apply(self._init_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list( union_heads ) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def grad_checkpoint_enable(self): if not self.grad_checkpoint: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") self.apply(partial(self._set_grad_checkpoint, value=True)) def grad_checkpoint_disable(self): if self.grad_checkpoint: self.apply(partial(self._set_grad_checkpoint, value=False)) @property def is_grad_checkpoint(self): return any(hasattr(m, "grad_checkpoint") and m.grad_checkpoint for m in self.modules()) def save_pretrained( self, save_directory, save_config=True, state_dict=None, save_function=torch.save, push_to_hub=False, **kw, ): if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if push_to_hub: commit_message = kw.pop("commit_message", None) repo = self._create_or_get_repo(save_directory, **kw) os.makedirs(save_directory, exist_ok=True) # Only save the model itself if we are using distributed training model_to_save = unwrap_model(self) # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" # we currently don't use this setting automatically, but may start to use with v5 dtype = get_parameter_dtype(model_to_save) model_to_save.config.torch_dtype = str(dtype).split(".")[1] # Attach architecture to the config model_to_save.config.archs = [model_to_save.__class__.__name__] # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) # Save the config if save_config: model_to_save.config.save_pretrained(save_directory) # Save the model if state_dict is None: state_dict = model_to_save.state_dict() # Handle the case where some state_dict keys shouldn't be saved if self._keys_to_ignore_on_save is not None: for ignore_key in self._keys_to_ignore_on_save: if ignore_key in state_dict.keys(): del state_dict[ignore_key] # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) save_function(state_dict, output_model_file) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: url = self._push_to_hub(repo, commit_message=commit_message) logger.info(f"Model pushed to the hub in this commit: {url}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kw): config = kw.pop("config", None) state_dict = kw.pop("state_dict", None) cache_dir = kw.pop("cache_dir", None) from_tf = kw.pop("from_tf", False) from_flax = kw.pop("from_flax", False) ignore_mismatched_sizes = kw.pop("ignore_mismatched_sizes", False) force_download = kw.pop("force_download", False) resume_download = kw.pop("resume_download", False) proxies = kw.pop("proxies", None) output_loading_info = kw.pop("output_loading_info", False) local_files_only = kw.pop("local_files_only", False) use_auth_token = kw.pop("use_auth_token", None) revision = kw.pop("revision", None) mirror = kw.pop("mirror", None) from_pipeline = kw.pop("_from_pipeline", None) from_auto_class = kw.pop("_from_auto", False) _fast_init = kw.pop("_fast_init", True) torch_dtype = kw.pop("torch_dtype", None) low_cpu_mem_usage = kw.pop("low_cpu_mem_usage", False) from_pt = not (from_tf | from_flax) user_agent = { "file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class, } if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kw = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kw=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kw, ) else: model_kw = kw # Load model if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") ): # Load from a TF 1.0 checkpoint in priority if from_tf archive_file = os.path.join( pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index" ) elif from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) ): # Load from a TF 2.0 checkpoint in priority if from_tf archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif from_flax and os.path.isfile( os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) ): # Load from a Flax checkpoint in priority if from_flax archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) # At this stage we don't have a weight file so we will raise an error. elif os.path.isfile( os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but " "there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those " "weights." ) elif os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but " "there is a file for Flax weights. Use `from_flax=True` to load this model from those " "weights." ) else: raise EnvironmentError( f"Error no file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or " f"{FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}." ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url( pretrained_model_name_or_path ): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): if not from_tf: raise ValueError( f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set " "from_tf to True to load from this checkpoint." ) archive_file = pretrained_model_name_or_path + ".index" else: # set correct filename if from_tf: filename = TF2_WEIGHTS_NAME elif from_flax: filename = FLAX_WEIGHTS_NAME else: filename = WEIGHTS_NAME archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=filename, revision=revision, mirror=mirror, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login` and pass `use_auth_token=True`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: if filename == WEIGHTS_NAME: has_file_kw = { "revision": revision, "mirror": mirror, "proxies": proxies, "use_auth_token": use_auth_token, } if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kw): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME} but " "there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those " "weights." ) elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kw): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME} but " "there is a file for Flax weights. Use `from_flax=True` to load this model from those " "weights." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}, " f"{TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {filename}." ) except HTTPError: raise EnvironmentError( "We couldn't connect to 'https://huggingface.co/' to load this model and it looks like " f"{pretrained_model_name_or_path} is not the path to a directory conaining a a file named " f"{WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}.\n" "Checkout your internet connection or see how to run the library in offline mode at " "'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or " f"{FLAX_WEIGHTS_NAME}." ) if resolved_archive_file == archive_file: logger.info(f"loading weights file {archive_file}") else: logger.info( f"loading weights file {archive_file} from cache at {resolved_archive_file}" ) else: resolved_archive_file = None # load pt weights early so that we know which dtype to init the model under if from_pt: if state_dict is None: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") except Exception as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' " f"at '{resolved_archive_file}'. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True." ) # set dtype to instantiate the model under: # 1. If torch_dtype is not None, we use that dtype # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first # weights entry - we assume all weights are of the same dtype # we also may have config.torch_dtype available, but we won't rely on it till v5 dtype_orig = None if torch_dtype is not None: if isinstance(torch_dtype, str): if torch_dtype == "auto": torch_dtype = next(iter(state_dict.values())).dtype else: raise ValueError( f"`torch_dtype` can be either a `torch.dtype` or `auto`, but received {torch_dtype}" ) dtype_orig = cls._set_default_torch_dtype(torch_dtype) if low_cpu_mem_usage: # save the keys loaded_state_dict_keys = [k for k in state_dict.keys()] del state_dict # free CPU memory - will reload again later config.name_or_path = pretrained_model_name_or_path # Instantiate model. if is_deepspeed_zero3_enabled(): import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): with no_init_weights(_enable=_fast_init): model = cls(config, *model_args, **model_kw) else: with no_init_weights(_enable=_fast_init): model = cls(config, *model_args, **model_kw) if from_pt: # restore default dtype if dtype_orig is not None: torch.set_default_dtype(dtype_orig) if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights( model, config, resolved_archive_file[:-6] ) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model( model, resolved_archive_file, allow_missing_keys=True ) except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise elif from_flax: try: from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) except ImportError: logger.error( "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see " "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions." ) raise elif from_pt: if low_cpu_mem_usage: cls._load_state_dict_into_model_low_mem( model, loaded_state_dict_keys, resolved_archive_file ) else: ( model, missing_keys, unexpected_keys, mismatched_keys, error_msgs, ) = cls._load_state_dict_into_model( model, state_dict, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, _fast_init=_fast_init, ) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, "error_msgs": error_msgs, } return model, loading_info return model @classmethod def _load_state_dict_into_model( cls, model, state_dict, pretrained_model_name_or_path, ignore_mismatched_sizes=False, _fast_init=True, ): # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # Retrieve missing & unexpected_keys model_state_dict = model.state_dict() expected_keys = list(model_state_dict.keys()) loaded_keys = list(state_dict.keys()) prefix = model.base_model_prefix has_prefix_module = any(s.startswith(prefix) for s in loaded_keys) expects_prefix_module = any(s.startswith(prefix) for s in expected_keys) # key re-naming operations are never done on the keys # that are loaded, but always on the keys of the newly initialized model remove_prefix_from_model = not has_prefix_module and expects_prefix_module add_prefix_to_model = has_prefix_module and not expects_prefix_module if remove_prefix_from_model: expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(prefix)] expected_keys = [ ".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys ] elif add_prefix_to_model: expected_keys = [".".join([prefix, s]) for s in expected_keys] missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not # matching the weights in the model. mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if remove_prefix_from_model: # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it. model_key = f"{prefix}.{checkpoint_key}" elif add_prefix_to_model: # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it. model_key = ".".join(checkpoint_key.split(".")[1:]) if ( model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape ): mismatched_keys.append( ( checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape, ) ) del state_dict[checkpoint_key] # Some models may have keys that are not in the state by design, removing them before needlessly warning # the user. if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if _fast_init: # retrieve unintialized modules and initialize uninitialized_modules = model.retrieve_modules_from_names( missing_keys, add_prefix=add_prefix_to_model, remove_prefix=remove_prefix_from_model ) for module in uninitialized_modules: model._init_weights(module) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata error_msgs = [] # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) if is_deepspeed_zero3_enabled(): import deepspeed # because zero3 puts placeholders in model params, this context # manager gathers (unpartitions) the params of the current layer, then loads from # the state dict and then re-partitions them again with deepspeed.zero.GatheredParameters( list(module.parameters(recurse=False)), modifier_rank=0 ): if torch.distributed.get_rank() == 0: module._load_from_state_dict(*args) else: module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") # Make sure we are able to load base models as well as derived models (with heads) beg_prefix = "" model_to_load = model if not hasattr(model, cls.base_model_prefix) and has_prefix_module: beg_prefix = cls.base_model_prefix + "." if hasattr(model, cls.base_model_prefix) and not has_prefix_module: model_to_load = getattr(model, cls.base_model_prefix) if any(key in expected_keys_not_prefixed for key in loaded_keys): raise ValueError( "The state dictionary of the model you are training to load is corrupted. Are you sure it was " "properly saved?" ) load(model_to_load, prefix=beg_prefix) if len(error_msgs) > 0: error_msg = "\n\t".join(error_msgs) raise RuntimeError( f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}" ) if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info( f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n" ) if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): module_keys = set([".".join(key.split(".")[:-1]) for key in names]) # torch.nn.ParameterList is a special case where two parameter keywords # are appended to the module name, *e.g.* bert.special_embeddings.0 module_keys = module_keys.union( set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()]) ) retrieved_modules = [] # retrieve all modules that has at least one missing weight name for name, module in self.named_modules(): if remove_prefix: name = ( ".".join(name.split(".")[1:]) if name.startswith(self.base_model_prefix) else name ) elif add_prefix: name = ( ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix ) if name in module_keys: retrieved_modules.append(module) return retrieved_modules @classmethod def _load_state_dict_into_model_low_mem( cls, model, loaded_state_dict_keys, resolved_archive_file ): require_version_core("torch>=1.9") if is_deepspeed_zero3_enabled(): raise ValueError("low_cpu_mem_usage arg cannot be used with DeepSpeed ZeRO-3") # a helper util to find the last sub-module and the param/buffer name def find_submodule_and_param_name(model, long_key): split_key = long_key.split(".") submodule = model while len(split_key) > 1: if hasattr(submodule, split_key[0]): submodule = getattr(submodule, split_key[0]) del split_key[0] else: submodule = None break return submodule, split_key[0] # dematerialize param storage for keys that are going to be replaced by state_dict, by # putting those on the meta device for k in loaded_state_dict_keys: submodule, param_name = find_submodule_and_param_name(model, k) if submodule is not None: # selectively switch to the meta device only those params/buffers that will # be next replaced from state_dict. This a complex way to do p.to_("meta") # since we have no in-place to_ for tensors. new_val = getattr(submodule, param_name) if isinstance(new_val, torch.nn.Parameter): # isinstance returns False for Params on meta device, so switch after the check new_val = torch.nn.Parameter(new_val.to("meta")) else: new_val = new_val.to("meta") setattr(submodule, param_name, new_val) # only now can load state_dict state_dict = torch.load(resolved_archive_file, map_location="cpu") # materialize state_dict entries one by one on CPU for k in loaded_state_dict_keys: submodule, param_name = find_submodule_and_param_name(model, k) if submodule is not None: new_val = state_dict[k] if isinstance(getattr(submodule, param_name), torch.nn.Parameter): new_val = torch.nn.Parameter(new_val) setattr(submodule, param_name, new_val) del state_dict @classmethod def register_for_auto_class(cls, auto_class="AutoModel"): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="AutoModel", object_files="model checkpoint" ) class Conv1D(nn.Module): def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x @dataclass class SquadHeadOutput(ModelOutput): loss = None top_beg_log_probs = None top_beg_index = None top_end_log_probs = None top_end_index = None cls_logits = None class SQuADHead(nn.Module): def __init__(self, config): super().__init__() self.beg_n_top = config.beg_n_top self.end_n_top = config.end_n_top self.logits_beg = PoolerStartLogits(config) self.logits_end = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) def forward( self, hiddens, beg_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None, return_dict=False, ): logits_beg = self.logits_beg(hiddens, p_mask=p_mask) if beg_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (beg_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position logits_end = self.logits_end(hiddens, beg_positions=beg_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() beg_loss = loss_fct(logits_beg, beg_positions) end_loss = loss_fct(logits_end, end_positions) total_loss = (beg_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class( hiddens, beg_positions=beg_positions, cls_index=cls_index ) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to beg_loss and end_loss total_loss += cls_loss * 0.5 return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hiddens.size() beg_log_probs = nn.functional.softmax(logits_beg, dim=-1) # shape (bsz, slen) top_beg_log_probs, top_beg_index = torch.topk( beg_log_probs, self.beg_n_top, dim=-1 ) # shape (bsz, beg_n_top) top_beg_index_exp = top_beg_index.unsqueeze(-1).expand( -1, -1, hsz ) # shape (bsz, beg_n_top, hsz) x_beg = torch.gather(hiddens, -2, top_beg_index_exp) # shape (bsz, beg_n_top, hsz) x_beg = x_beg.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, beg_n_top, hsz) hidden_states_expanded = hiddens.unsqueeze(2).expand_as( x_beg ) # shape (bsz, slen, beg_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None logits_end = self.logits_end(hidden_states_expanded, x_beg=x_beg, p_mask=p_mask) end_log_probs = nn.functional.softmax(logits_end, dim=1) # shape (bsz, slen, beg_n_top) top_end_log_probs, top_end_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, beg_n_top) top_end_log_probs = top_end_log_probs.view(-1, self.beg_n_top * self.end_n_top) top_end_index = top_end_index.view(-1, self.beg_n_top * self.end_n_top) x_beg = torch.einsum("blh,bl->bh", hiddens, beg_log_probs) cls_logits = self.answer_class(hiddens, x_beg=x_beg, cls_index=cls_index) if not return_dict: return ( top_beg_log_probs, top_beg_index, top_end_log_probs, top_end_index, cls_logits, ) else: return SquadHeadOutput( top_beg_log_probs=top_beg_log_probs, top_beg_index=top_beg_index, top_end_log_probs=top_end_log_probs, top_end_index=top_end_index, cls_logits=cls_logits, ) class SequenceSummary(nn.Module): def __init__(self, config): super().__init__() self.summy_type = getattr(config, "summy_type", "last") if self.summy_type == "attn": raise NotImplementedError self.summary = Identity() if hasattr(config, "sum_use_proj") and config.sum_use_proj: if hasattr(config, "sum_proj") and config.sum_proj and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.d_model self.summary = nn.Linear(config.d_model, num_classes) activation_string = getattr(config, "sum_act", None) self.activation = get_activation(activation_string) if activation_string else Identity() self.first_dropout = Identity() if hasattr(config, "drop_sum_first") and config.drop_sum_first > 0: self.first_dropout = nn.Dropout(config.drop_sum_first) self.last_dropout = Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward(self, hiddens, cls_index=None): if self.summy_type == "last": output = hiddens[:, -1] elif self.summy_type == "first": output = hiddens[:, 0] elif self.summy_type == "mean": output = hiddens.mean(dim=1) elif self.summy_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hiddens[..., :1, :], hiddens.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hiddens.size(-1),)) # shape of cls_index: (bsz, XX, 1, d_model) where XX are optional leading dim of hiddens output = hiddens.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, d_model) elif self.summy_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def unwrap_model(model): if hasattr(model, "module"): return unwrap_model(model.module) else: return model
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,440
quantapix/qnarre
refs/heads/main
/tools/triton/python/test/unit/runtime/test_launch.py
import gc # import importlib # import os # import sys # import tempfile # import textwrap # import time import tracemalloc import torch import triton import triton.language as tl # from typing import Tuple def test_memory_leak() -> None: @triton.jit def kernel(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 10 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0 + tl.zeros([XBLOCK], tl.int32)), tmp0, xmask) tracemalloc.start() try: inp = torch.randn(10, device='cuda') out = torch.randn(10, device='cuda') kernel[(10,)](inp, out, 10, XBLOCK=16) gc.collect() begin, _ = tracemalloc.get_traced_memory() for _ in range(100): kernel[(10,)](inp, out, 10, XBLOCK=16) gc.collect() end, _ = tracemalloc.get_traced_memory() assert end - begin < 1000 finally: tracemalloc.stop() # LATENCY_THRESHOLD_US = 46 # def test_kernel_launch_latency() -> None: # def define_kernel(kernel_name: str, num_tensor_args: int) -> str: # arg_str = ",".join([f"arg{i}: torch.Tensor" for i in range(num_tensor_args)]) # arg_str += ", n_elements: int, BLOCK_SIZE: tl.constexpr" # func_str = f""" # import torch # import triton # import triton.language as tl # @triton.jit # def {kernel_name}({arg_str}): # pass # """ # with tempfile.NamedTemporaryFile(mode="w+t", suffix=".py", delete=False) as temp_file: # temp_file.write(textwrap.dedent(func_str)) # temp_file_path = temp_file.name # return temp_file_path # def import_kernel(file_path, kernel_name): # directory, filename = os.path.split(file_path) # module_name, _ = os.path.splitext(filename) # sys.path.insert(0, directory) # module = importlib.import_module(module_name) # kernel = getattr(module, kernel_name) # return kernel # def empty(*kernel_args: Tuple[torch.Tensor]): # first_arg = kernel_args[0] # n_elements = first_arg.numel() # grid = (triton.cdiv(n_elements, 1024),) # device = torch.cuda.current_device() # # Warmup # empty_kernel[grid](*kernel_args, n_elements, BLOCK_SIZE=1024, device=device) # torch.cuda.synchronize() # # Measure launch overhead at steady state # num_runs = 1000 # start_time = time.time() # for i in range(num_runs): # empty_kernel[grid](*kernel_args, n_elements, BLOCK_SIZE=1024, device=device) # end_time = time.time() # latency_us = (end_time - start_time) / num_runs * 1e6 # assert latency_us < LATENCY_THRESHOLD_US, "Kernel launch time has increased!" # num_tensor_args = 40 # kernel_name = 'empty_kernel' # file_path = define_kernel(kernel_name, num_tensor_args) # empty_kernel = import_kernel(file_path, kernel_name) # # Initialize random tensors for the empty_kernel # torch.manual_seed(0) # size = 1024 # kernel_args = (torch.rand(size, device='cuda') for i in range(num_tensor_args)) # # Run empty, which would run empty_kernel internally # empty(*kernel_args)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,441
quantapix/qnarre
refs/heads/main
/qnarre/tokens/utils.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import bisect import itertools import re import unicodedata from collections import OrderedDict from .file_utils import PaddingStrategy from .base import ( AddedToken, BatchEncoding, PreTrainedTokenizerBase, TextInput, TruncationStrategy, ) from transformers.utils import logging log = logging.get_logger(__name__) # Slow tokenizers are saved in a vocabulary plus three separated files SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" class Trie: def __init__(self): self.data = {} def add(self, word): if not word: # Prevent empty string return ref = self.data for char in word: ref[char] = char in ref and ref[char] or {} ref = ref[char] ref[""] = 1 def split(self, text): states = OrderedDict() offsets = [0] skip = 0 for current, current_char in enumerate(text): if skip and current < skip: continue to_remove = set() reset = False for start, trie_pointer in states.items(): if "" in trie_pointer: for lookstart, looktrie_pointer in states.items(): if lookstart > start: break elif lookstart < start: lookahead_index = current + 1 end = current + 1 else: lookahead_index = current end = current next_char = text[lookahead_index] if lookahead_index < len(text) else None if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index while next_char in looktrie_pointer: looktrie_pointer = looktrie_pointer[next_char] lookahead_index += 1 if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index if lookahead_index == len(text): # End of string break next_char = text[lookahead_index] # End lookahead # Storing and resetting offsets.append(start) offsets.append(end) reset = True break elif current_char in trie_pointer: # The current character being looked at has a match within the trie # update the pointer (it will be stored back into states later). trie_pointer = trie_pointer[current_char] # Storing back the new pointer into the states. # Partial matches got longer by one. states[start] = trie_pointer else: # The new character has not match in the trie, we need # to stop keeping track of this partial match. # We can't do it directly within the loop because of how # python iteration works to_remove.add(start) # Either clearing the full start (we found a real match) # Or clearing only the partial matches that didn't work. if reset: states = {} else: for start in to_remove: del states[start] # If this character is a starting character within the trie # start keeping track of this partial match. if current >= skip and current_char in self.data: states[current] = self.data[current_char] # We have a cut at the end with states. for start, trie_pointer in states.items(): if "" in trie_pointer: # This is a final match, we need to reset and # store the results in `offsets`. end = len(text) offsets.append(start) offsets.append(end) # Longest cut is always the one with lower start so the first # item so we need to break. break return self.cut_text(text, offsets) def cut_text(self, text, offsets): # We have all the offsets now, we just need to do the actual splitting. # We need to eventually add the first part of the string and the eventual # last part. offsets.append(len(text)) tokens = [] start = 0 for end in offsets: if start > end: log.error( "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway." ) continue elif start == end: # This might happen if there's a match at index 0 # we're also preventing zero-width cuts in case of two # consecutive matches continue tokens.append(text[start:end]) start = end return tokens def _is_whitespace(char): if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ( (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126) ): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def _is_end_of_word(text): last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char)) def _is_start_of_word(text): first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char)) def _insert_one_token_to_ordered_list(token_list, new_token): insertion_idx = bisect.bisect_left(token_list, new_token) # Checks if new_token is already in the ordered token_list if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: # new_token is in token_list, don't add return else: token_list.insert(insertion_idx, new_token) class PreTrainedTokenizer(PreTrainedTokenizerBase): def __init__(self, **kw): super().__init__(**kw) self.added_tokens_encoder = {} self.added_tokens_decoder = {} self.unique_no_split_tokens = [] self.tokens_trie = Trie() self._decode_use_source_tokenizer = False @property def is_fast(self): return False @property def s_vocab(self): raise NotImplementedError def get_added_vocab(self): return self.added_tokens_encoder def __len__(self): return self.s_vocab + len(self.added_tokens_encoder) def _add_tokens(self, new_tokens, special_tokens=False): new_tokens = [str(tok) for tok in new_tokens] tokens_to_add = [] for token in new_tokens: if not isinstance(token, str): raise TypeError(f"Token {token} is not a string but a {type(token)}.") if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case: token = token.lower() if ( token != self.unk and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk) and token not in tokens_to_add ): tokens_to_add.append(token) if self.verbose: log.info(f"Adding {token} to the vocabulary") added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add)) added_tok_decoder = {v: k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) if special_tokens: if len(new_tokens) == 1: _insert_one_token_to_ordered_list(self.unique_no_split_tokens, new_tokens[0]) else: self.unique_no_split_tokens = sorted( set(self.unique_no_split_tokens).union(set(new_tokens)) ) else: # Or on the newly added tokens if len(tokens_to_add) == 1: _insert_one_token_to_ordered_list(self.unique_no_split_tokens, tokens_to_add[0]) else: self.unique_no_split_tokens = sorted( set(self.unique_no_split_tokens).union(set(tokens_to_add)) ) self._create_trie(self.unique_no_split_tokens) return len(tokens_to_add) def _create_trie(self, unique_no_split_tokens): trie = Trie() for token in unique_no_split_tokens: if ( hasattr(self, "do_lower_case") and self.do_lower_case and token not in self.all_special_tokens ): trie.add(token.lower()) else: trie.add(token) self.tokens_trie = trie def num_special_tokens_to_add(self, pair=False): toks_0 = [] toks_1 = [] return len(self.build_inputs_with_special_tokens(toks_0, toks_1 if pair else None)) def tokenize(self, text: TextInput, **kw): all_special_tokens_extended = dict( (str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken) ) text, kw = self.prepare_for_tokenization(text, **kw) if kw: log.warning(f"Keyword arguments {kw} not recognized.") # TODO: should this be in the base class? if hasattr(self, "do_lower_case") and self.do_lower_case: # convert non-special tokens to lowercase escaped_special_toks = [ re.escape(s_tok) for s_tok in (self.unique_no_split_tokens + self.all_special_tokens) ] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text) no_split_token = set(self.unique_no_split_tokens) tokens = self.tokens_trie.split(text) # ["This is something", "<special_token_1>", " else"] for i, token in enumerate(tokens): if token in no_split_token: tok_extended = all_special_tokens_extended.get(token, None) left = tokens[i - 1] if i > 0 else None right = tokens[i + 1] if i < len(tokens) - 1 else None if isinstance(tok_extended, AddedToken): if tok_extended.rstrip and right: # A bit counter-intuitive but we strip the left of the string # since tok_extended.rstrip means the special token is eating all white spaces on its right tokens[i + 1] = right.lstrip() # Strip white spaces on the left if tok_extended.lstrip and left: tokens[i - 1] = left.rstrip() # Opposite here else: # We strip left and right by default if right: tokens[i + 1] = right.lstrip() if left: tokens[i - 1] = left.rstrip() # ["This is something", "<special_token_1>", "else"] tokenized_text = [] for token in tokens: # Need to skip eventual empty (fully stripped) tokens if not token: continue if token in no_split_token: tokenized_text.append(token) else: tokenized_text.extend(self._tokenize(token)) # ["This", " is", " something", "<special_token_1>", "else"] return tokenized_text def _tokenize(self, text, **kw): raise NotImplementedError def convert_tokens_to_ids(self, tokens): if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def _encode_plus( self, text, text_pair=None, add_special_tokens=True, padding_strategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE, max_len=None, stride=0, is_split_into_words=False, pad_to_multiple_of=None, return_tensors=None, return_token_type_ids=None, return_attention_mask=None, return_overflowing_tokens=False, return_special_tokens_mask=False, return_offsets_mapping=False, return_length=False, verbose=True, **kw, ): def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kw) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain( *(self.tokenize(t, is_split_into_words=True, **kw) for t in text) ) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: if is_split_into_words: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`." ) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_len=max_len, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs, add_special_tokens=True, padding_strategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE, max_len=None, stride=0, is_split_into_words=False, pad_to_multiple_of=None, return_tensors=None, return_token_type_ids=None, return_attention_mask=None, return_overflowing_tokens=False, return_special_tokens_mask=False, return_offsets_mapping=False, return_length=False, verbose=True, **kw, ): def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kw) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain( *(self.tokenize(t, is_split_into_words=True, **kw) for t in text) ) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_len=max_len, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) def _batch_prepare_for_model( self, batch_ids_pairs, add_special_tokens=True, padding_strategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy=TruncationStrategy.DO_NOT_TRUNCATE, max_len=None, stride=0, pad_to_multiple_of=None, return_tensors=None, return_token_type_ids=None, return_attention_mask=None, return_overflowing_tokens=False, return_special_tokens_mask=False, return_length=False, verbose=True, ): batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward truncation=truncation_strategy.value, max_len=max_len, stride=stride, pad_to_multiple_of=None, # we pad in batch afterward return_attention_mask=False, # we pad in batch afterward return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, # We convert the whole batch to tensors at the end prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_len=max_len, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def prepare_for_tokenization(self, text, is_split_into_words=False, **kw): return (text, kw) def get_special_tokens_mask( self, toks_0, toks_1=None, has_specials=False, ): if has_specials: if toks_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True) return [0] * ((len(toks_1) if toks_1 else 0) + len(toks_0)) def convert_ids_to_tokens(self, ids, skip_special_tokens=False): ... def convert_ids_to_tokens(self, ids, skip_special_tokens=False): ... def convert_ids_to_tokens(self, ids, skip_special_tokens=False): if isinstance(ids, int): if ids in self.added_tokens_decoder: return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self.added_tokens_decoder: tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index): raise NotImplementedError def convert_tokens_to_string(self, tokens): return " ".join(tokens) def _decode( self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True, spaces_between_special_tokens=True, **kw, ): self._decode_use_source_tokenizer = kw.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens( token_ids, skip_special_tokens=skip_special_tokens ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) if spaces_between_special_tokens: text = " ".join(sub_texts) else: text = "".join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,442
quantapix/qnarre
refs/heads/main
/tools/triton/python/test/unit/debugger/test_debugger.py
import random import torch import triton import triton.language as tl from triton.debugger.debugger import program_ids_from_grid def test_addition(): @triton.jit(interpret=True) def add_kernel( x_ptr, y_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_ptr + offsets, mask=mask) output = x + y tl.store(output_ptr + offsets, output, mask=mask) a = torch.rand((128,), device="cuda") b = torch.rand((128,), device="cuda") expected = a + b output = torch.empty((128,), device="cuda") def grid(meta): return (triton.cdiv(128, meta["BLOCK_SIZE"]),) add_kernel[grid](a, b, output, 128, BLOCK_SIZE=32) assert torch.allclose(expected, output, atol=1e-2, rtol=0) def test_program_ids_from_grid(): random.seed(123) grid = (3, 4) expected_combinations = 3 * 4 unique_combinations = set(program_ids_from_grid(grid)) assert len(unique_combinations) == expected_combinations first_run = list(program_ids_from_grid(grid)) second_run = list(program_ids_from_grid(grid)) assert first_run != second_run def test_atomic(): @triton.jit(interpret=True) def atomic( x_ptr, ): pid = tl.program_id(axis=0) tl.atomic_add(x_ptr + pid, 1) t = tl.atomic_xchg(x_ptr + pid, 3) t += 1 # 2 tl.atomic_cas(x_ptr + pid, 3, t) # match tl.atomic_cas(x_ptr + pid, 40, 9) # no match nb_dim = 16 a = torch.zeros((nb_dim, ), dtype=torch.int32, device="cuda") atomic[(nb_dim, )](a) assert torch.allclose(a, torch.full_like(a, 2))
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,443
quantapix/qnarre
refs/heads/main
/qnarre/prep/convert/xlm.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json import numpy import torch from argparse import ArgumentParser from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.models.xlm.tokenization_xlm import VOCAB_FS from transformers.utils import logging logging.set_verbosity_info() def to_pytorch(src_path, save_path): chkpt = torch.load(src_path, map_location="cpu") state_dict = chkpt["model"] two_levels_state_dict = {} for k, v in state_dict.items(): if "pred_layer" in k: two_levels_state_dict[k] = v else: two_levels_state_dict["transformer." + k] = v cfg = chkpt["params"] cfg = dict( (n, v) for n, v in cfg.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray)) ) vocab = chkpt["dico_word2id"] vocab = dict( (s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""), i) for s, i in vocab.items() ) w = save_path + "/" + WEIGHTS_NAME c = save_path + "/" + CONFIG_NAME v = save_path + "/" + VOCAB_FS["vocab_file"] print(f"Saving to: {w}") torch.save(two_levels_state_dict, w) print(f"Saving config to: {c}") with open(c, "w", encoding="utf-8") as f: f.write(json.dumps(cfg, indent=2) + "\n") print(f"Saving vocab to: {v}") with open(v, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2) + "\n") if __name__ == "__main__": x = ArgumentParser() x.add_argument("--src_path", default=None, type=str, required=True) x.add_argument("--save_path", default=None, type=str, required=True) y = x.parse_args() to_pytorch(y.src_path, y.save_path)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,444
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/language/__init__.py
"""isort:skip_file""" # Import order is significant here. from . import math from . import extra from .standard import ( cdiv, sigmoid, softmax, ravel, swizzle2d, zeros, zeros_like, ) from .core import ( abs, advance, arange, argmin, argmax, atomic_add, atomic_and, atomic_cas, atomic_max, atomic_min, atomic_or, atomic_xchg, atomic_xor, bfloat16, block_type, broadcast, broadcast_to, cat, constexpr, cos, debug_barrier, device_assert, device_print, dot, dtype, exp, expand_dims, full, fdiv, float16, float32, float64, float8e4, float8e5, function_type, int1, int16, int32, int64, int8, load, log, make_block_ptr, max, max_contiguous, maximum, min, minimum, multiple_of, num_programs, pi32_t, pointer_type, program_id, reduce, reshape, sin, sqrt, static_assert, static_print, store, sum, static_range, tensor, trans, triton, uint16, uint32, uint64, uint8, umulhi, view, void, where, xor_sum, ) from .random import ( pair_uniform_to_normal, philox, philox_impl, rand, rand4x, randint, randint4x, randn, randn4x, uint32_to_uniform_float, ) __all__ = [ "abs", "advance", "arange", "argmin", "argmax", "atomic_add", "atomic_and", "atomic_cas", "atomic_max", "atomic_min", "atomic_or", "atomic_xchg", "atomic_xor", "bfloat16", "block_type", "broadcast", "broadcast_to", "builtin", "cat", "cdiv", "constexpr", "cos", "debug_barrier", "device_assert", "device_print", "dot", "dtype", "exp", "expand_dims", "extra", "fdiv", "float16", "float32", "float64", "float8e4", "float8e5", "full", "function_type", "int1", "int16", "int32", "int64", "int8", "ir", "math", "load", "log", "make_block_ptr", "max", "max_contiguous", "maximum", "min", "minimum", "multiple_of", "num_programs", "pair_uniform_to_normal", "philox", "philox_impl", "pi32_t", "pointer_type", "program_id", "rand", "rand4x", "randint", "randint4x", "randn", "randn4x", "ravel", "reduce", "reshape", "sigmoid", "sin", "softmax", "sqrt", "static_range", "static_assert", "static_print", "store", "sum", "swizzle2d", "tensor", "trans", "triton", "uint16", "uint32", "uint32_to_uniform_float", "uint64", "uint8", "umulhi", "view", "void", "where", "xor_sum", "zeros", "zeros_like", ]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,445
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/contain.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import networkx as nx from .graph import Graphs from .nominals import nominal from .base import Record, LnkFull, LnkPartial def is_partial(src, dst, sample=20, chunk=120): n = len(src) if n > chunk and len(dst) > chunk: if src[:sample] in dst or src[-(sample + 1):-1] in dst: return True ms, mc = sample // 2, chunk // 2 for i in range(n // chunk): i *= chunk s = src[i:i + chunk] if len(s) == chunk: if s[mc - ms:mc + ms] in dst: return True class Contains(Graphs): _graphs = tuple(a.label for a in (Record, LnkFull, LnkPartial)) def msg_attrs(self, txt, kind, **kw): n = nominal(txt) kw.update(empty=len(n) < 5, nominal=n, kind=kind) return kw def grow_full(self, cntr, **_): mg, fg = self.record, self.full ns = ((m, mg.node[m]['nominal']) for m in mg.nodes()) ns = sorted(ns, key=lambda t: len(t[1])) for i, (m, n) in enumerate(ns): ns2 = ns[i + 1:] while ns2: m2, n2 = ns2.pop(0) if n in n2: fg.add_edge(m, m2) if len(n) == len(n2): fg.add_edge(m2, m) cntr.incr('=') else: cntr.incr('<') ss = nx.dfs_successors(fg, m2).values() ss = {s for sl in ss for s in sl} ns2 = [(m, n) for m, n in ns2 if m not in ss] def grow_partial(self, cntr, **_): mg, fg, pg = self.record, self.full, self.partial ns = [(m, mg.node[m]['nominal']) for m in mg.nodes()] for i, (m, n) in enumerate(ns): ns2 = ns[:] del ns2[i] while ns2: m2, n2 = ns2.pop(0) if is_partial(n, n2): pg.add_edge(m, m2) cntr.incr('~') ss = nx.dfs_successors(fg, m2).values() ss = {s for sl in ss for s in sl} ns2 = [(m, n) for m, n in ns2 if m not in ss] def grow_from(self, src, **kw): super().grow_from(src, **kw) self.purge_empty(**kw) self.grow_full(**kw) # self.grow_partial(**kw) Contains.init_class()
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,446
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/fast/t5.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os from shutil import copyfile from ....tokens.fast import PreTrainedTokenizerFast from ..t5 import Tokenizer as T5 VOCAB_FS = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} VOCAB_MAP = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", }, "tokenizer_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json", }, } INPUT_CAPS = { "t5-small": 512, "t5-base": 512, "t5-large": 512, "t5-3b": 512, "t5-11b": 512, } class Tokenizer(PreTrainedTokenizerFast): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP input_caps = INPUT_CAPS model_input_names = ["input_ids", "mask"] slow_tokenizer_class = T5 prefix_tokens = [] def __init__( self, vocab_file=None, tokenizer_file=None, eos="</s>", unk="<unk>", pad="<pad>", extra_ids=100, additional_special_tokens=None, **kw, ): if extra_ids > 0 and additional_special_tokens is None: additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and additional_special_tokens is not None: extra_tokens = len( set(filter(lambda x: ("extra_id_" in str(x)), additional_special_tokens)) ) assert extra_tokens == extra_ids super().__init__( vocab_file, tokenizer_file=tokenizer_file, eos=eos, unk=unk, pad=pad, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, **kw, ) self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True self._extra_ids = extra_ids def save_vocabulary(self, dir, pre=None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) path = os.path.join(dir, (pre + "-" if pre else "") + VOCAB_FS["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(path): copyfile(self.vocab_file, path) return (path,) def build_inputs_with_special_tokens(self, toks_0, toks_1=None): toks_0 = toks_0 + [self.EOS] if toks_1 is None: return self.prefix_tokens + toks_0 else: toks_1 = toks_1 + [self.EOS] return self.prefix_tokens + toks_0 + toks_1 def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): eos = [self.EOS] if toks_1 is None: return len(toks_0 + eos) * [0] return len(toks_0 + eos + toks_1 + eos) * [0]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,447
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/language/core.py
from __future__ import annotations from contextlib import contextmanager from enum import Enum from functools import wraps from typing import Callable, List, Sequence, TypeVar import triton from . import semantic from triton._C.libtriton.triton import ir T = TypeVar('T') TRITON_MAX_TENSOR_NUMEL = 131072 TRITON_BUILTIN = "__triton_builtin__" def builtin(fn: T) -> T: """Mark a function as a builtin.""" assert callable(fn) @wraps(fn) def wrapper(*args, **kwargs): if "_builder" not in kwargs or kwargs["_builder"] is None: raise ValueError( "Did you forget to add @triton.jit ? " "(`_builder` argument must be provided outside of JIT functions.)" ) return fn(*args, **kwargs) setattr(wrapper, TRITON_BUILTIN, True) return wrapper def is_builtin(fn) -> bool: """Is this a registered triton builtin function?""" return getattr(fn, TRITON_BUILTIN, False) def _to_tensor(x, builder): if isinstance(x, bool): return tensor(builder.get_int1(x), int1) # Note: compile-time const integers are represented by unsigned values elif isinstance(x, int): if -2**31 <= x < 2**31: return tensor(builder.get_int32(x), int32) elif 2**31 <= x < 2**32: return tensor(builder.get_int32(x), uint32) elif -2**63 <= x < 2**63: return tensor(builder.get_int64(x), int64) elif 2**63 <= x < 2**64: return tensor(builder.get_int64(x), uint64) else: raise RuntimeError(f'Nonrepresentable integer {x}.') elif isinstance(x, float): min_float32 = 2 ** -126 max_float32 = (2 - 2**-23) * 2**127 abs_x = __builtins__['abs'](x) if abs_x == float("inf") or\ abs_x == 0.0 or \ x != x or \ min_float32 <= abs_x <= max_float32: return tensor(builder.get_fp32(x), float32) else: return tensor(builder.get_fp64(x), float64) elif isinstance(x, constexpr): return _to_tensor(x.value, builder) elif isinstance(x, tensor): return x assert False, f"cannot convert {x} of type {type(x)} to tensor" class dtype: SINT_TYPES = ['int8', 'int16', 'int32', 'int64'] UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64'] FP_TYPES = ['fp8e4', 'fp8e5', 'fp16', 'bf16', 'fp32', 'fp64'] STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] OTHER_TYPES = ['void'] class SIGNEDNESS(Enum): SIGNED = 0 UNSIGNED = 1 def __init__(self, name): self.name = name assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name if name in dtype.SINT_TYPES: self.int_signedness = dtype.SIGNEDNESS.SIGNED self.int_bitwidth = int(name.split('int')[-1]) self.primitive_bitwidth = self.int_bitwidth elif name in dtype.UINT_TYPES: self.int_signedness = dtype.SIGNEDNESS.UNSIGNED self.int_bitwidth = int(name.split('int')[-1]) self.primitive_bitwidth = self.int_bitwidth elif name in dtype.FP_TYPES: if name == 'fp8e4': self.fp_mantissa_width = 3 self.primitive_bitwidth = 8 elif name == 'fp8e5': self.fp_mantissa_width = 2 self.primitive_bitwidth = 8 elif name == 'fp16': self.fp_mantissa_width = 10 self.primitive_bitwidth = 16 elif name == 'bf16': self.fp_mantissa_width = 7 self.primitive_bitwidth = 16 elif name == 'fp32': self.fp_mantissa_width = 23 self.primitive_bitwidth = 32 elif name == 'fp64': self.fp_mantissa_width = 53 self.primitive_bitwidth = 64 else: raise RuntimeError(f'Unsupported floating-point type {name}') elif name == 'void': self.primitive_bitwidth = 0 def is_fp8(self): return 'fp8' in self.name def is_fp16(self): return self.name == 'fp16' def is_bf16(self): return self.name == 'bf16' def is_fp32(self): return self.name == 'fp32' def is_fp64(self): return self.name == 'fp64' def is_int1(self): return self.name == 'int1' def is_int8(self): return self.name == 'int8' def is_int16(self): return self.name == 'int16' def is_int32(self): return self.name == 'int32' def is_int64(self): return self.name == 'int64' def is_uint8(self): return self.name == 'uint8' def is_uint16(self): return self.name == 'uint16' def is_uint32(self): return self.name == 'uint32' def is_uint64(self): return self.name == 'uint64' def is_floating(self): return self.name in dtype.FP_TYPES def is_standard_floating(self): return self.name in dtype.STANDARD_FP_TYPES def is_int_signed(self): return self.name in dtype.SINT_TYPES def is_int_unsigned(self): return self.name in dtype.UINT_TYPES def is_int(self): return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES def is_bool(self): return self.is_int1() @staticmethod def is_void(): raise RuntimeError("Not implemented") @staticmethod def is_block(): return False @staticmethod def is_ptr(): return False def __eq__(self, other: dtype): if not isinstance(other, dtype): return False return self.name == other.name def __ne__(self, other: dtype): return not self.__eq__(other) def __hash__(self): return hash((self.name,)) @property def scalar(self): return self def to_ir(self, builder: ir.builder) -> ir.type: if self.name == 'void': return builder.get_void_ty() elif self.name == 'int1': return builder.get_int1_ty() elif self.name in ('int8', 'uint8'): return builder.get_int8_ty() elif self.name in ('int16', 'uint16'): return builder.get_int16_ty() elif self.name in ('int32', 'uint32'): return builder.get_int32_ty() elif self.name in ('int64', 'uint64'): return builder.get_int64_ty() elif self.name == 'fp8e5': return builder.get_fp8e5_ty() elif self.name == 'fp8e4': return builder.get_fp8e4_ty() elif self.name == 'fp16': return builder.get_half_ty() elif self.name == 'bf16': return builder.get_bf16_ty() elif self.name == 'fp32': return builder.get_float_ty() elif self.name == 'fp64': return builder.get_double_ty() raise ValueError(f'fail to convert {self} to ir type') def __str__(self): return self.name @property def cache_key_part(self) -> str: """See cache_key_part() in triton.cc.""" return self.name def __repr__(self): return f'triton.language.{self.name}' class pointer_type(dtype): def __init__(self, element_ty: dtype, address_space: int = 1): if not isinstance(element_ty, dtype): raise TypeError('element_ty is a {type(element_ty).__name__}.') self.element_ty = element_ty self.address_space = address_space self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.pointer_type: return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) def __str__(self): return f'pointer<{self.element_ty}>' def __repr__(self): return self.__str__() def is_ptr(self): return True def __eq__(self, other: pointer_type) -> bool: if not isinstance(other, pointer_type): return False return self.element_ty == other.element_ty and self.address_space == other.address_space def __ne__(self, other: pointer_type) -> bool: return not self.__eq__(other) @property def scalar(self): return self class block_type(dtype): def __init__(self, element_ty: dtype, shape: List): self.element_ty = element_ty # Note that block_type's shape is a list of int # while tensor's shape is a list of constexpr. # shape can be empty ([]) when an input is a 0D tensor. if not shape: raise TypeError('0d block_type is forbidden') if isinstance(shape[0], constexpr): shape = [s.value for s in shape] self.shape = shape self.numel = 1 for s in self.shape: self.numel *= s if self.numel > TRITON_MAX_TENSOR_NUMEL: raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})") self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.block_type: return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) def __str__(self): return f'<{self.shape}, {self.element_ty}>' def __repr__(self): return self.__str__() def is_block(self): return True def get_block_shapes(self) -> List[int]: return self.shape def __eq__(self, other: block_type) -> bool: if not isinstance(other, block_type): return False return self.element_ty == other.element_ty and self.shape == other.shape def __ne__(self, other: block_type) -> bool: return not self.__eq__(other) @property def scalar(self): return self.element_ty class function_type(dtype): def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: self.ret_types = ret_types self.param_types = param_types def __str__(self): return f'fn ({self.param_types}) -> {self.ret_types}' def to_ir(self, builder: ir.builder): ir_param_types = [ty.to_ir(builder) for ty in self.param_types] ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] return builder.get_function_ty(ir_param_types, ret_types) # scalar types void = dtype('void') int1 = dtype('int1') int8 = dtype('int8') int16 = dtype('int16') int32 = dtype('int32') int64 = dtype('int64') uint8 = dtype('uint8') uint16 = dtype('uint16') uint32 = dtype('uint32') uint64 = dtype('uint64') float8e5 = dtype('fp8e5') float8e4 = dtype('fp8e4') float16 = dtype('fp16') bfloat16 = dtype('bf16') float32 = dtype('fp32') float64 = dtype('fp64') # pointer types pi32_t = pointer_type(int32) # ----------------------- # constexpr # ----------------------- class constexpr: """ This class is used to store a value that is known at compile-time. """ def __init__(self, value): if isinstance(value, constexpr): self.value = value.value else: self.value = value def __repr__(self) -> str: return f"constexpr[{self.value}]" def __add__(self, other): return constexpr(self.value + other.value) def __radd__(self, other): return constexpr(other.value + self.value) def __sub__(self, other): return constexpr(self.value - other.value) def __rsub__(self, other): return constexpr(other.value - self.value) def __mul__(self, other): return constexpr(self.value * other.value) def __mod__(self, other): return constexpr(self.value % other.value) def __rmul__(self, other): return constexpr(other.value * self.value) def __truediv__(self, other): return constexpr(self.value / other.value) def __rtruediv__(self, other): return constexpr(other.value / self.value) def __floordiv__(self, other): return constexpr(self.value // other.value) def __rfloordiv__(self, other): return constexpr(other.value // self.value) def __gt__(self, other): return constexpr(self.value > other.value) def __rgt__(self, other): return constexpr(other.value > self.value) def __ge__(self, other): return constexpr(self.value >= other.value) def __rge__(self, other): return constexpr(other.value >= self.value) def __lt__(self, other): return constexpr(self.value < other.value) def __rlt__(self, other): return constexpr(other.value < self.value) def __le__(self, other): return constexpr(self.value <= other.value) def __rle__(self, other): return constexpr(other.value <= self.value) def __eq__(self, other): return constexpr(self.value == other.value) def __ne__(self, other): return constexpr(self.value != other.value) def __bool__(self): return bool(self.value) def __neg__(self): return constexpr(-self.value) def __and__(self, other): return constexpr(self.value & other.value) def logical_and(self, other): return constexpr(self.value and other.value) def __or__(self, other): return constexpr(self.value | other.value) def __xor__(self, other): return constexpr(self.value ^ other.value) def logical_or(self, other): return constexpr(self.value or other.value) def __pos__(self): return constexpr(+self.value) def __invert__(self): return constexpr(~self.value) def __pow__(self, other): return constexpr(self.value ** other.value) def __rshift__(self, other): return constexpr(self.value >> other.value) def __lshift__(self, other): return constexpr(self.value << other.value) def __not__(self): return constexpr(not self.value) def __call__(self, *args, **kwds): return self.value(*args, **kwds) class tensor: def __init__(self, handle, type: dtype): # IR handle self.handle = handle # Block shape self.shape = (1, ) if type.is_block(): self.shape = type.shape self.numel = 1 for s in self.shape: self.numel *= s self.numel = constexpr(self.numel) self.type = type # Tensor type (can be block_type) # Following the practice in pytorch, dtype is scalar type self.dtype = type.scalar self.shape = [constexpr(s) for s in self.shape] def __str__(self) -> str: # ex. "float32[3,4]" return str(self.dtype) + '[' + ','.join(str(s) for s in self.shape) + ']' @builtin def __add__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.add(self, other, _builder) def __radd__(self, other, _builder=None): return self.__add__(other, _builder=_builder) @builtin def __sub__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.sub(self, other, _builder) def __rsub__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.sub(other, self, _builder) @builtin def __mul__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mul(self, other, _builder) def __rmul__(self, other, _builder=None): return self.__mul__(other, _builder=_builder) @builtin def __truediv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.truediv(self, other, _builder) def __rtruediv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.truediv(other, self, _builder) @builtin def __floordiv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.floordiv(self, other, _builder) @builtin def __rfloordiv__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.floordiv(other, self, _builder) @builtin def __mod__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mod(self, other, _builder) @builtin def __rmod__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.mod(other, self, _builder) # unary operators @builtin def __neg__(self, _builder=None): return semantic.minus(self, _builder) @builtin def __invert__(self, _builder=None): return semantic.invert(self, _builder) # bitwise operators @builtin def __and__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.and_(self, other, _builder) @builtin def __rand__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.and_(other, self, _builder) @builtin def __or__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.or_(self, other, _builder) @builtin def __ror__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.or_(other, self, _builder) @builtin def __xor__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.xor_(self, other, _builder) @builtin def __rxor__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.xor_(other, self, _builder) @builtin def __lshift__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.shl(self, other, _builder) @builtin def __rlshift__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.shl(other, self, _builder) @builtin def __rshift__(self, other, _builder=None): other = _to_tensor(other, _builder) if self.dtype.is_int_signed(): return semantic.ashr(self, other, _builder) else: return semantic.lshr(self, other, _builder) @builtin def __rrshift__(self, other, _builder=None): other = _to_tensor(other, _builder) if self.dtype.is_int_signed(): return semantic.ashr(other, self, _builder) else: return semantic.lshr(other, self, _builder) # comparison operators # > @builtin def __gt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_than(self, other, _builder) @builtin def __rgt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_than(other, self, _builder) # >= @builtin def __ge__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_equal(self, other, _builder) @builtin def __rge__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.greater_equal(other, self, _builder) # < @builtin def __lt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_than(self, other, _builder) @builtin def __rlt__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_than(other, self, _builder) # <= @builtin def __le__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_equal(self, other, _builder) @builtin def __rle__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.less_equal(other, self, _builder) # == @builtin def __eq__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.equal(self, other, _builder) @builtin def __ne__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.not_equal(self, other, _builder) @builtin def logical_and(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.logical_and(self, other, _builder) @builtin def logical_or(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.logical_or(self, other, _builder) # note: __not__ isn't actually a magic method in python # but it's ok because our ASTVisitor handles it @builtin def __not__(self, _builder=None): return semantic.not_(self, _builder) @builtin def __getitem__(self, slices, _builder=None): if isinstance(slices, slice): slices = [slices] ret = self for dim, sl in enumerate(slices): if isinstance(sl, constexpr) and sl.value is None: ret = semantic.expand_dims(ret, dim, _builder) elif sl == slice(None, None, None): pass else: assert False, f"unsupported tensor index: {sl}" return ret @property def T(self): assert False, "Transposition must be created by the AST Visitor" @builtin def to(self, dtype, bitcast=False, _builder=None): if isinstance(bitcast, constexpr): bitcast = bitcast.value if bitcast: return semantic.bitcast(self, dtype, _builder) return semantic.cast(self, dtype, _builder) # ----------------------- # SPMD Programming Model # ----------------------- def _constexpr_to_value(v): if isinstance(v, constexpr): return v.value return v @builtin def program_id(axis, _builder=None): """ Returns the id of the current program instance along the given :code:`axis`. :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. :type axis: int """ # if axis == -1: # pid0 = program_id(0, _builder) # pid1 = program_id(1, _builder) # pid2 = program_id(2, _builder) # npg0 = num_programs(0, _builder) # npg1 = num_programs(0, _builder) # return pid0 + pid1*npg0 + pid2*npg0*npg1 axis = _constexpr_to_value(axis) return semantic.program_id(axis, _builder) @builtin def num_programs(axis, _builder=None): """ Returns the number of program instances launched along the given :code:`axis`. :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. :type axis: int """ axis = _constexpr_to_value(axis) return semantic.num_programs(axis, _builder) # ----------------------- # Block Initialization # ----------------------- @builtin def arange(start, end, _builder=None): """ Returns contiguous values within the left-closed and right-open interval [:code:`start`, :code:`end`). \ End - Start must be less than or equal to TRITON_MAX_TENSOR_NUMEL = 131072 :param start: Start of the interval. Must be a power of two. :type start: int32 :param end: End of the interval. Must be a power of two > start. :type end: int32 """ start = _constexpr_to_value(start) end = _constexpr_to_value(end) return semantic.arange(start, end, _builder) def _shape_check_impl(shape): shape = _constexpr_to_value(shape) for i, d in enumerate(shape): if not isinstance(d, constexpr): raise TypeError(f"Shape element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") return [_constexpr_to_value(x) for x in shape] @builtin def full(shape, value, dtype, _builder=None): """ Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`. :param shape: Shape of the new array, e.g., (8, 16) or (8, ) :value value: A scalar value to fill the array with :type shape: tuple of ints :param dtype: Data-type of the new array, e.g., :code:`tl.float16` :type dtype: DType """ shape = _shape_check_impl(shape) value = _constexpr_to_value(value) dtype = _constexpr_to_value(dtype) return semantic.full(shape, value, dtype, _builder) # ----------------------- # Shape Manipulation # ----------------------- @builtin def broadcast(input, other, _builder=None): """ Tries to broadcast the two given blocks to a common compatible shape. :param input: The first input tensor. :type input: Block :param other: The second input tensor. :type other: Block """ return semantic.broadcast_impl_value(input, other, _builder) @builtin def broadcast_to(input, shape, _builder=None): """ Tries to broadcast the given tensor to a new :code:`shape`. :param input: The input tensor. :type input: Block :param shape: The desired shape. :type shape: Tuple[int] """ shape = _shape_check_impl(shape) return semantic.broadcast_impl_shape(input, shape, _builder) @builtin def trans(input, _builder=None): return semantic.trans(input, _builder) @builtin def cat(input, other, can_reorder=False, _builder=None): """ Concatenate the given blocks :param input: The first input tensor. :type input: :param other: The second input tensor. :type other: :param reorder: Compiler hint. If true, the compiler is allowed to reorder elements while concatenating inputs. Only use if the order does not matter (e.g., result is only used in reduction ops) """ return semantic.cat(input, other, can_reorder, _builder) @builtin def view(input, shape, _builder=None): """ Returns a tensor with the same elements as `input` but a different shape. The order of the elements may not be preserved. :param input: The input tensor. :type input: :param shape: The desired shape. :type shape: Tuple[int] """ shape = _shape_check_impl(shape) return semantic.view(input, shape, _builder) @builtin def reshape(input, shape, _builder=None): shape = _shape_check_impl(shape) return semantic.reshape(input, shape, _builder) def _wrap_axis(axis, ndim): if not (-ndim <= axis < ndim): raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}") return axis if axis >= 0 else axis + ndim @builtin def expand_dims(input, axis, _builder=None): """ Expand the shape of a tensor, by inserting new length-1 dimensions. Axis indices are with respect to the resulting tensor, so ``result.shape[axis]`` will be 1 for each axis. :param input: The input tensor. :type input: tl.tensor :param axis: The indices to add new axes :type axis: int | Sequence[int] """ axis = _constexpr_to_value(axis) axes = list(axis) if isinstance(axis, Sequence) else [axis] new_ndim = len(input.shape) + len(axes) axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes] if len(set(axes)) != len(axes): raise ValueError(f"expand_dims recieved duplicate axes, normalized axes = {axes}") ret = input for a in sorted(axes): ret = semantic.expand_dims(ret, a, _builder) return ret # ----------------------- # Linear Algebra # ----------------------- @builtin def dot(input, other, allow_tf32=True, out_dtype=float32, _builder=None): """ Returns the matrix product of two blocks. The two blocks must be two-dimensional and have compatible inner dimensions. :param input: The first tensor to be multiplied. :type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} :param other: The second tensor to be multiplied. :type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} """ allow_tf32 = _constexpr_to_value(allow_tf32) out_dtype = _constexpr_to_value(out_dtype) return semantic.dot(input, other, allow_tf32, out_dtype, _builder) # ----------------------- # Non-Atomic Memory Operations # ----------------------- @builtin def load(pointer, mask=None, other=None, boundary_check=tuple(), padding_option="", cache_modifier="", eviction_policy="", volatile=False, _builder=None): """ Return a tensor of data whose values are loaded from memory at location defined by `pointer`: (1) `pointer` could be a single element pointer, then a scalar will be loaded - `mask` and `other` must be scalar too - `other` is implicitly typecast to `pointer.dtype.element_ty` - `boundary_check` and `padding_option` must be empty (2) `pointer` could be element-wise tensor of pointers, in which case: - `mask` and `other` are implicitly broadcast to `pointer.shape` - `other` is implicitly typecast to `pointer.dtype.element_ty` - `boundary_check` and `padding_option` must be empty (3) `pointer` could be a block pointer defined by `make_block_ptr`, in which case: - `mask` and `other` must be None - `boundary_check` and `padding_option` can be specified to control the behavior of out-of-bound access :param pointer: Pointer to the data to be loaded :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` :param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]` (must be `None` with block pointers) :type mask: Block of `triton.int1`, optional :param other: if `mask[idx]` is false, return `other[idx]` :type other: Block, optional :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check :type boundary_check: tuple of ints, optional :param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound :param cache_modifier: changes cache option in NVIDIA PTX :type cache_modifier: str, optional :param eviction_policy: changes eviction policy in NVIDIA PTX :type eviction_policy: str, optional :param volatile: changes volatile option in NVIDIA PTX :type volatile: bool, optional """ # `mask` and `other` can be constexpr if _constexpr_to_value(mask) is not None: mask = _to_tensor(mask, _builder) if _constexpr_to_value(other) is not None: other = _to_tensor(other, _builder) padding_option = _constexpr_to_value(padding_option) cache_modifier = _constexpr_to_value(cache_modifier) eviction_policy = _constexpr_to_value(eviction_policy) volatile = _constexpr_to_value(volatile) return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy, volatile, _builder) @builtin def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None): """ Store a tensor of data into memory locations defined by `pointer`: (1) `pointer` could be a single element pointer, then a scalar will be stored - `mask` must be scalar too - `boundary_check` and `padding_option` must be empty (2) `pointer` could be element-wise tensor of pointers, in which case: - `mask` is implicitly broadcast to `pointer.shape` - `boundary_check` must be empty (3) or `pointer` could be a block pointer defined by `make_block_ptr`, in which case: - `mask` must be None - `boundary_check` can be specified to control the behavior of out-of-bound access `value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`. :param pointer: The memory location where the elements of `value` are stored :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` :param value: The tensor of elements to be stored :type value: Block :param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]` :type mask: Block of triton.int1, optional :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check :type boundary_check: tuple of ints, optional :param cache_modifier: changes cache option in NVIDIA PTX :type cache_modifier: str, optional :param eviction_policy: changes eviction policy in NVIDIA PTX :type eviction_policy: str, optional """ # `value` can be constexpr value = _to_tensor(value, _builder) if _constexpr_to_value(mask) is not None: mask = _to_tensor(mask, _builder) cache_modifier = _constexpr_to_value(cache_modifier) eviction_policy = _constexpr_to_value(eviction_policy) return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder) @builtin def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None): """ Returns a pointer to a block in a parent tensor :param base: The base pointer to the parent tensor :param shape: The shape of the parent tensor :param strides: The strides of the parent tensor :param offsets: The offsets to the block :param block_shape: The shape of the block :param order: The order of the original data format """ return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder) @builtin def advance(base: tensor, offsets, _builder=None): """ Advance a block pointer :param base: the block pointer to advance :param offsets: the offsets to advance, a tuple by dimension """ return semantic.advance(base, offsets, _builder) # ----------------------- # Atomic Memory Operations # ----------------------- def _add_atomic_docstr(name: str) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Performs an atomic {name} at the memory location specified by :code:`pointer`. Return the data stored at :code:`pointer` before the atomic operation. :param pointer: The memory locations to compare-and-swap. :type pointer: Block of dtype=triton.PointerDType :param cmp: The values expected to be found in the atomic object :type cmp: Block of dtype=`pointer.dtype.element_ty` :param val: The values to copy in case the expected value matches the contained value. :type val: Block of dtype=`pointer.dtype.element_ty` """ func.__doc__ = docstr.format(name=name) return func return _decorator @builtin @_add_atomic_docstr("compare-and-swap") def atomic_cas(pointer, cmp, val, _builder=None): cmp = _to_tensor(cmp, _builder) val = _to_tensor(val, _builder) return semantic.atomic_cas(pointer, cmp, val, _builder) @builtin @_add_atomic_docstr("exchange") def atomic_xchg(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_xchg(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("add") def atomic_add(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_add(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("max") def atomic_max(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_max(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("min") def atomic_min(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_min(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("logical and") def atomic_and(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_and(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("logical or") def atomic_or(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_or(pointer, val, mask, _builder) @builtin @_add_atomic_docstr("logical xor") def atomic_xor(pointer, val, mask=None, _builder=None): val = _to_tensor(val, _builder) return semantic.atomic_xor(pointer, val, mask, _builder) # ----------------------- # Conditioning # ----------------------- @builtin def where(condition, x, y, _builder=None): """ Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`. Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`. If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead. The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`. :code:`x` and :code:`y` must have the same data type. :param condition: When True (nonzero), yield x, otherwise yield y. :type condition: Block of triton.bool :param x: values selected at indices where condition is True. :param y: values selected at indices where condition is False. """ condition = _to_tensor(condition, _builder) x = _to_tensor(x, _builder) y = _to_tensor(y, _builder) return semantic.where(condition, x, y, _builder) # ----------------------- # Math # ----------------------- @builtin def umulhi(x, y, _builder=None): x = _to_tensor(x, _builder) y = _to_tensor(y, _builder) return semantic.umulhi(x, y, _builder) @builtin def fdiv(x, y, ieee_rounding=False, _builder=None): ieee_rounding = _constexpr_to_value(ieee_rounding) return semantic.fdiv(x, y, ieee_rounding, _builder) def _add_math_1arg_docstr(name: str) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Computes the element-wise {name} of :code:`x`. :param x: the input values :type x: Block """ func.__doc__ = docstr.format(name=name) return func return _decorator @builtin @_add_math_1arg_docstr("exponential") def exp(x, _builder=None): return semantic.exp(x, _builder) @builtin @_add_math_1arg_docstr("natural logarithm") def log(x, _builder=None): return semantic.log(x, _builder) @builtin @_add_math_1arg_docstr("cosine") def cos(x, _builder=None): return semantic.cos(x, _builder) @builtin @_add_math_1arg_docstr("sine") def sin(x, _builder=None): return semantic.sin(x, _builder) @builtin @_add_math_1arg_docstr("square root") def sqrt(x, _builder=None): return semantic.sqrt(x, _builder) @builtin @_add_math_1arg_docstr("absolute value") def abs(x, _builder=None): return semantic.abs(x, _builder) # ----------------------- # Reductions # ----------------------- def _add_reduction_docstr(name: str) -> Callable[[T], T]: def _decorator(func: T) -> T: docstr = """ Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` :param input: the input values :param axis: the dimension along which the reduction should be done """ func.__doc__ = docstr.format(name=name) return func return _decorator @contextmanager def _insertion_guard(builder): ip = builder.get_insertion_point() yield builder.restore_insertion_point(ip) @builtin def reduce(input, axis, combine_fn, _builder=None, _generator=None): """Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis` :param input: the input tensor, or tuple of tensors :param axis: the dimension along which the reduction should be done :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) """ if isinstance(input, tensor): return reduce((input,), axis, combine_fn, _builder=_builder, _generator=_generator)[0] def make_combine_region(reduce_op): in_scalar_tys = [t.type.scalar for t in input] prototype = function_type(in_scalar_tys, in_scalar_tys * 2) region = reduce_op.get_region(0) with _insertion_guard(_builder): param_types = [ty.to_ir(_builder) for ty in prototype.param_types] block = _builder.create_block_with_parent(region, param_types) args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] results = _generator.call_JitFunction(combine_fn, args, kwargs={}) if isinstance(results, tensor): handles = [results.handle] else: handles = [r.handle for r in results] _builder.create_reduce_ret(*handles) axis = _constexpr_to_value(axis) return semantic.reduction(input, axis, make_combine_region, _builder) @builtin def _promote_reduction_input(t, _builder=None): scalar_ty = t.type.scalar # input is extended to 32-bits if necessary # this increases numerical accuracy and can be done pretty much for free # on GPUs if scalar_ty.is_int() and scalar_ty.int_bitwidth < 32: return t.to(int32, _builder=_builder) # hardware doesn't support FMAX, FMIN, CMP for bfloat16 if scalar_ty is bfloat16: return t.to(float32, _builder=_builder) return t @builtin def _argreduce(input, axis, combine_fn, _builder=None, _generator=None): axis = _constexpr_to_value(axis) n = input.shape[axis] index = arange(0, n, _builder=_builder) if len(input.shape) > 1: # Broadcast index across the non-reduced axes axes_to_expand = [constexpr(d) for d in range(len(input.shape))] del axes_to_expand[axis] index = expand_dims(index, axes_to_expand, _builder=_builder) index = broadcast_to(index, input.shape, _builder=_builder) rvalue, rindices = reduce((input, index), axis, combine_fn, _builder=_builder, _generator=_generator) return rindices @triton.jit def minimum(x, y): """ Computes the element-wise minimum of :code:`x` and :code:`y`. :param input: the first input tensor :type input: Block :param other: the second input tensor :type other: Block """ return where(x < y, x, y) @triton.jit def maximum(x, y): """ Computes the element-wise maximum of :code:`x` and :code:`y`. :param input: the first input tensor :type input: Block :param other: the second input tensor :type other: Block """ return where(x > y, x, y) @triton.jit def _max_combine(a, b): return maximum(a, b) @triton.jit @_add_reduction_docstr("maximum") def max(input, axis): input = _promote_reduction_input(input) return reduce(input, axis, _max_combine) @triton.jit def _argmax_combine(value1, index1, value2, index2): gt = value1 > value2 lt = value1 < value2 index_min = minimum(index1, index2) index_ret = where(gt, index1, where(lt, index2, index_min)) value_ret = maximum(value1, value2) return value_ret, index_ret @triton.jit @_add_reduction_docstr("maximum index") def argmax(input, axis): input = _promote_reduction_input(input) return _argreduce(input, axis, _argmax_combine) @triton.jit def _min_combine(a, b): # TODO: minimum/maximum doesn't get lowered to fmin/fmax... return minimum(a, b) @triton.jit @_add_reduction_docstr("minimum") def min(input, axis): input = _promote_reduction_input(input) return reduce(input, axis, _min_combine) @triton.jit def _argmin_combine(value1, index1, value2, index2): lt = value1 < value2 gt = value1 > value2 index_min = minimum(index1, index2) index_ret = where(lt, index1, where(gt, index2, index_min)) value_ret = minimum(value1, value2) return value_ret, index_ret @triton.jit @_add_reduction_docstr("minimum index") def argmin(input, axis): input = _promote_reduction_input(input) return _argreduce(input, axis, _argmin_combine) @triton.jit def _sum_combine(a, b): return a + b @triton.jit @_add_reduction_docstr("sum") def sum(input, axis): input = _promote_reduction_input(input) return reduce(input, axis, _sum_combine) @triton.jit def _xor_combine(a, b): return a ^ b @builtin @_add_reduction_docstr("xor sum") def xor_sum(input, axis, _builder=None, _generator=None): scalar_ty = input.type.scalar if not scalar_ty.is_int(): raise ValueError("xor_sum only supported for integers") input = _promote_reduction_input(input, _builder=_builder) return reduce(input, axis, _xor_combine, _builder=_builder, _generator=_generator) # ----------------------- # Internal for debugging # ----------------------- @builtin def debug_barrier(_builder=None): return semantic.debug_barrier(_builder) @builtin def multiple_of(input, values, _builder=None): """ Let the compiler knows that the values in :code:`input` are all multiples of :code:`value`. """ if isinstance(values, constexpr): values = [values] for i, d in enumerate(values): if not isinstance(d, constexpr): raise TypeError(f"values element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") values = [x.value for x in values] return semantic.multiple_of(input, values) @builtin def max_contiguous(input, values, _builder=None): """ Let the compiler knows that the `value` first values in :code:`input` are contiguous. """ if isinstance(values, constexpr): values = [values] for i, d in enumerate(values): if not isinstance(d, constexpr): raise TypeError(f"values element {i} must have type `constexpr`") if not isinstance(d.value, int): raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") values = [x.value for x in values] return semantic.max_contiguous(input, values) # ----------------------- # Debugging functions # ----------------------- @builtin def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None): pass @builtin def static_assert(cond, msg="", _builder=None): pass @builtin def device_print(prefix, *args, _builder=None): import string prefix = _constexpr_to_value(prefix) assert isinstance(prefix, str), f"{prefix} is not string" b_ascii = True for ch in prefix: if ch not in string.printable: b_ascii = False break assert b_ascii, f"{prefix} is not an ascii string" new_args = [] for arg in args: new_args.append(_to_tensor(arg, _builder)) return semantic.device_print(prefix, new_args, _builder) @builtin def device_assert(cond, msg="", _builder=None): msg = _constexpr_to_value(msg) import inspect frame = inspect.currentframe() module = inspect.getmodule(frame) # The triton function module doesn't have the name attribute. # We use this trick to find the caller. while hasattr(module, "__name__"): frame = frame.f_back module = inspect.getmodule(frame) func_name = frame.f_code.co_name file_name = frame.f_back.f_code.co_filename # TODO: The line number currently indicates the line # where the triton function is called but not where the # device_assert is called. Need to enhance this. lineno = frame.f_back.f_lineno return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder) # ----------------------- # Iterators # ----------------------- class static_range: """Iterator that counts upward forever.""" def __init__(self, arg1, arg2=None, step=None): assert isinstance(arg1, constexpr) if step is None: self.step = constexpr(1) else: assert isinstance(step, constexpr) self.step = step if arg2 is None: self.start = constexpr(0) self.end = arg1 else: assert isinstance(arg2, constexpr) self.start = arg1 self.end = arg2 def __iter__(self): raise RuntimeError("static_range can only be used in @triton.jit'd functions") def __next__(self): raise RuntimeError("static_range can only be used in @triton.jit'd functions") # ----------------------- # Extern functions # ----------------------- def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, is_pure: bool, _builder=None): ''' Dispatch a function to a library :param func: the function to dispatch :param lib_name: the name of the library :param lib_path: the path of the library :param args: the arguments of the function :param arg_type_symbol_dict: the type of the arguments :param ret_shape: the shape of the return value :param _builder: the builder :return: the return value of the function ''' if len(arg_type_symbol_dict) == 0: raise ValueError("arg_type_symbol_dict is empty") num_args = len(list(arg_type_symbol_dict.keys())[0]) if len(args) != num_args: raise ValueError(f"length of input args does not match." f"Expect {len(args)}, got {num_args}") arg_types = [] arg_list = [] for arg in args: if isinstance(arg, tensor): arg_types.append(arg.dtype) arg_list.append(arg.handle) else: arg_types.append(type(arg)) arg_list.append(arg) arg_types = tuple(arg_types) if arg_types not in arg_type_symbol_dict: raise ValueError(f"input arg type does not match." f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}") else: symbol = arg_type_symbol_dict[arg_types][0] ret_type = arg_type_symbol_dict[arg_types][1] if ret_shape: ret_type = block_type(ret_type, ret_shape) return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type) def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool, _builder=None): ''' Dispatch an elementwise function to a library :param lib_name: the name of the library :param lib_path: the path of the library :param args: the arguments of the function :param arg_type_symbol_dict: the type of the arguments :param is_pure: whether the function is pure :param _builder: the builder :return: the return value of the function ''' dispatch_args = args.copy() all_scalar = True ret_shape = None arg_types = [] for i in range(len(dispatch_args)): dispatch_args[i] = _to_tensor(dispatch_args[i], _builder) arg_types.append(dispatch_args[i].dtype) if dispatch_args[i].type.is_block(): all_scalar = False if len(arg_types) > 0: arg_types = tuple(arg_types) arithmetic_check = True # If there's a type tuple that is not supported by the library, we will do arithmetic check if arg_types in arg_type_symbol_dict: arithmetic_check = False broadcast_arg = dispatch_args[0] # Get the broadcast shape over all the arguments for i, item in enumerate(dispatch_args): _, broadcast_arg = semantic.binary_op_type_checking_impl( item, broadcast_arg, _builder, arithmetic_check=arithmetic_check) # Change the shape of each argument based on the broadcast shape for i in range(len(dispatch_args)): dispatch_args[i], _ = semantic.binary_op_type_checking_impl( dispatch_args[i], broadcast_arg, _builder, arithmetic_check=arithmetic_check) if not all_scalar: ret_shape = broadcast_arg.shape func = getattr(_builder, "create_extern_elementwise") return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder) def extern(fn): """A decorator for external functions.""" return builtin(fn)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,448
quantapix/qnarre
refs/heads/main
/qnarre/models/megatron.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import math import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import forward as qf from ..core import output as qo from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.megatron import PreTrained from torch.nn import CrossEntropyLoss from ...pytorch_utils import ( apply_chunking_to_forward, ) log = logging.get_logger(__name__) LIST = [ "nvidia/megatron-bert-cased-345m", ] class MegatronBertEmbeddings(qc.Module): def __init__(self, config): super().__init__() self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD) self.position_embeddings = qc.Embed(config.n_pos, config.d_model) self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model) self.drop = qc.Dropout(config.drop) self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1))) self.pos_type = getattr(config, "pos_type", "absolute") def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[ :, past_key_values_length : seq_length + past_key_values_length ] if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=self.position_ids.device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.pos_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.drop(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MegatronBert class MegatronBertSelfAttention(qc.Module): def __init__(self, config, pos_type=None): super().__init__() if config.d_model % config.n_heads != 0 and not hasattr(config, "d_embed"): raise ValueError( f"The hidden size ({config.d_model}) is not a multiple of the number of attention " f"heads ({config.n_heads})" ) self.n_heads = config.n_heads self.attention_head_size = int(config.d_model / config.n_heads) self.all_head_size = self.n_heads * self.attention_head_size self.query = qc.Linear(config.d_model, self.all_head_size) self.key = qc.Linear(config.d_model, self.all_head_size) self.value = qc.Linear(config.d_model, self.all_head_size) self.drop = qc.Dropout(config.drop_attn) self.pos_type = pos_type or getattr(config, "pos_type", "absolute") if self.pos_type == "relative_key" or self.pos_type == "relative_key_query": self.n_pos = config.n_pos self.distance_embedding = qc.Embed(2 * config.n_pos - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.n_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hiddens, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hiddens) is_cross_attention = enc_hiddens is not None if is_cross_attention and past_key_value is not None: # reuse k,v, crosses key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(enc_hiddens)) value_layer = self.transpose_for_scores(self.value(enc_hiddens)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hiddens)) value_layer = self.transpose_for_scores(self.value(hiddens)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hiddens)) value_layer = self.transpose_for_scores(self.value(hiddens)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.pos_type == "relative_key" or self.pos_type == "relative_key_query": seq_length = hiddens.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view( -1, 1 ) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hiddens.device).view( 1, -1 ) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.n_pos - 1) positional_embedding = positional_embedding.to( dtype=query_layer.dtype ) # fp16 compatibility if self.pos_type == "relative_key": relative_position_scores = torch.einsum( "bhld,lrd->bhlr", query_layer, positional_embedding ) attention_scores = attention_scores + relative_position_scores elif self.pos_type == "relative_key_query": relative_position_scores_query = torch.einsum( "bhld,lrd->bhlr", query_layer, positional_embedding ) relative_position_scores_key = torch.einsum( "bhrd,lrd->bhlr", key_layer, positional_embedding ) attention_scores = ( attention_scores + relative_position_scores_query + relative_position_scores_key ) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in MegatronBertModel forward() function) attention_scores = attention_scores + attention_mask attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = self.drop(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to MegatronBertAttention below. class MegatronBertSelfOutput(qc.Module): def __init__(self, config): super().__init__() self.dense = qc.Linear(config.d_model, config.d_model) self.drop = qc.Dropout(config.drop) def forward(self, hiddens, residual): hiddens = self.dense(hiddens) hiddens = self.drop(hiddens) return residual + hiddens # Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm. class Attention(qc.Module): def __init__(self, config): super().__init__() self.ln = qc.LayerNorm(config.d_model, eps=config.eps) self.self = MegatronBertSelfAttention(config) self.output = MegatronBertSelfOutput(config) def forward( self, hiddens, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): ln_outputs = self.ln(hiddens) self_outputs = self.self( ln_outputs, attention_mask, head_mask, enc_hiddens, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hiddens) outputs = (attention_output,) + self_outputs[1:] # add attns if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert class MegatronBertIntermediate(qc.Module): def __init__(self, cfg): super().__init__() self.dense = qc.Linear(cfg.d_model, cfg.d_ff) self.act = qu.activation(cfg.act) def forward(self, x): y = self.dense(x) y = self.act(y) return y # Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below. class MegatronBertOutput(qc.Module): def __init__(self, config): super().__init__() self.dense = qc.Linear(config.d_ff, config.d_model) self.drop = qc.Dropout(config.drop) def forward(self, hiddens, input_tensor): hiddens = self.dense(hiddens) hiddens = self.drop(hiddens) return input_tensor + hiddens # Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm. class Layer(qc.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError( f"{self} should be used as a decoder model if cross attention is added" ) self.crossattention = Attention(config) self.ln = qc.LayerNorm(config.d_model, eps=config.eps) self.intermediate = MegatronBertIntermediate(config) self.output = MegatronBertOutput(config) def forward( self, hiddens, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hiddens, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attns if we output attention weights cross_attn_present_key_value = None if self.is_decoder and enc_hiddens is not None: if not hasattr(self, "crossattention"): raise AttributeError( f"If `enc_hiddens` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, enc_hiddens, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = ( outputs + cross_attention_outputs[1:-1] ) # add cross attns if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): ln_output = self.ln(attention_output) intermediate_output = self.intermediate(ln_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class Encoder(qc.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([Layer(config) for _ in range(config.n_lays)]) self.ln = qc.LayerNorm(config.d_model, eps=config.eps) self.gradient_checkpointing = False def forward( self, hiddens, attention_mask=None, head_mask=None, enc_hiddens=None, encoder_attention_mask=None, caches=None, y_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if y_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = caches[i] if caches is not None else None if self.gradient_checkpointing and self.training: if y_cache: log.warning( "`y_cache=True` is incompatible with gradient checkpointing. Setting `y_cache=False`..." ) y_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hiddens, attention_mask, layer_head_mask, enc_hiddens, encoder_attention_mask, ) else: layer_outputs = layer_module( hiddens, attention_mask, layer_head_mask, enc_hiddens, encoder_attention_mask, past_key_value, output_attentions, ) # Because we moved the layer-norm at the end of the hidden layer, we have non-normali- # zed data here. If that's really needed, we must apply LN to match Transformer's BERT. hiddens = layer_outputs[0] if y_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Finalize the hidden states. hiddens = self.ln(hiddens) if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if not return_dict: return tuple( v for v in [ hiddens, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return qo.CachesCrosses( y=hiddens, caches=next_decoder_cache, hiddens=all_hidden_states, attns=all_self_attentions, crosses=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert class MegatronBertOnlyNSPHead(qc.Module): def __init__(self, config): super().__init__() self.seq_relationship = qc.Linear(config.d_model, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert class MegatronBertPreTrainingHeads(qc.Module): def __init__(self, config): super().__init__() self.predictions = Predictor(config) self.seq_relationship = qc.Linear(config.d_model, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class Model(PreTrained): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MegatronBertEmbeddings(config) self.encoder = Encoder(config) self.pool = Pool(config) if add_pooling_layer else None def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, enc_hiddens=None, encoder_attention_mask=None, caches=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: y_cache = y_cache if y_cache is not None else self.config.y_cache else: y_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = caches[0][0].shape[2] if caches is not None else 0 if attention_mask is None: attention_mask = torch.ones( ((batch_size, seq_length + past_key_values_length)), device=device ) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, device ) if self.config.is_decoder and enc_hiddens is not None: encoder_batch_size, encoder_sequence_length, _ = enc_hiddens.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_lays) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_extended_attention_mask, caches=caches, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pool(sequence_output) if self.pool is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return qo.BaseWithPoolingAndCrossAttentions( y=sequence_output, pools=pooled_output, caches=encoder_outputs.caches, hiddens=encoder_outputs.hiddens, attns=encoder_outputs.attns, crosses=encoder_outputs.crosses, ) class ForPreTraining(PreTrained): def __init__(self, config, add_binary_head=True): super().__init__(config) self.bert = Model(config) self.cls = MegatronBertPreTrainingHeads(config) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.s_vocab), labels.view(-1) ) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) ) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return qo.LossSeq( loss=total_loss, logits=prediction_scores, orders=seq_relationship_score, hiddens=outputs.hiddens, attns=outputs.attns, ) class ForCausal(PreTrained): def __init__(self, config): super().__init__(config) if not config.is_decoder: log.warning("If you want to use `ForCausal` as a standalone, add `is_decoder=True.`") self.bert = Model(config, add_pooling_layer=False) self.cls = Predictor(config) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, enc_hiddens=None, encoder_attention_mask=None, labels=None, caches=None, y_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: y_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, enc_hiddens=enc_hiddens, encoder_attention_mask=encoder_attention_mask, caches=caches, y_cache=y_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct( shifted_prediction_scores.view(-1, self.config.s_vocab), labels.view(-1) ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, caches=outputs.caches, hiddens=outputs.hiddens, attns=outputs.attns, crosses=outputs.crosses, ) class ForMasked(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(add_pool=False, **kw) self.proj = Predictor(**kw) forward = qf.forward_masked class MegatronBertForNextPrediction(PreTrained): def __init__(self, config): super().__init__(config) self.bert = Model(config) self.cls = MegatronBertOnlyNSPHead(config) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kw, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hiddens=outputs.hiddens, attns=outputs.attns, ) class ForChoice(PreTrained): def __init__(self, config): super().__init__(config) self.bert = Model(config) self.drop = qc.Dropout(config.drop) self.classifier = qc.Linear(config.d_model, 1) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = ( attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None ) token_type_ids = ( token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None ) position_ids = ( position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None ) inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return qo.WithLoss( loss=loss, logits=reshaped_logits, hiddens=outputs.hiddens, attns=outputs.attns, ) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(**kw) forward = qf.forward_seq class ForTokClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(add_pool=False, **kw) self.proj = Classifier(**kw) forward = qf.forward_tok class ForQA(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(add_pool=False, **kw) self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw) forward = qf.forward_qa
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,449
quantapix/qnarre
refs/heads/main
/qnarre/models/fnet.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import torch import torch.utils.checkpoint from functools import partial from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import forward as qf from ..core import output as qo from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.fnet import PreTrained log = logging.get_logger(__name__) from torch.nn import CrossEntropyLoss from ...utils import is_scipy_available if is_scipy_available(): from scipy import linalg from ...pytorch_utils import apply_chunking_to_forward LIST = ["google/fnet-base", "google/fnet-large"] # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two): seq_length = x.shape[1] matrix_dim_one = matrix_dim_one[:seq_length, :seq_length] x = x.type(torch.complex64) return torch.einsum("bij,jk,ni->bnk", x, matrix_dim_two, matrix_dim_one) # # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py def two_dim_matmul(x, matrix_dim_one, matrix_dim_two): return _two_dim_matmul(x, matrix_dim_one, matrix_dim_two) # Adapted from https://github.com/google-research/google-research/blob/master/f_net/fourier.py def fftn(x): out = x for axis in reversed(range(x.ndim)[1:]): # We don't need to apply FFT to last axis out = torch.fft.fft(out, axis=axis) return out class FNetEmbeddings(qc.Module): def __init__(self, config): super().__init__() self.word_embeddings = qc.Embed(config.s_vocab, config.d_model, padding_idx=config.PAD) self.position_embeddings = qc.Embed(config.n_pos, config.d_model) self.token_type_embeddings = qc.Embed(config.n_typ, config.d_model) self.norm = qc.LayerNorm(config.d_model, eps=config.eps) self.projection = qc.Linear(config.d_model, config.d_model) self.drop = qc.Dropout(config.drop) self.register_buffer("position_ids", torch.arange(config.n_pos).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand( input_shape[0], seq_length ) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=self.position_ids.device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.norm(embeddings) embeddings = self.projection(embeddings) embeddings = self.drop(embeddings) return embeddings class FNetBasicFourierTransform(qc.Module): def __init__(self, config): super().__init__() self._init_fourier_transform(config) def _init_fourier_transform(self, config): if not config.use_tpu_fourier_optimizations: self.fourier_transform = partial(torch.fft.fftn, dim=(1, 2)) elif config.n_pos <= 4096: if is_scipy_available(): self.register_buffer( "dft_mat_hidden", torch.tensor(linalg.dft(config.d_model), dtype=torch.complex64), ) self.register_buffer( "dft_mat_seq", torch.tensor(linalg.dft(config.tpu_short_seq_length), dtype=torch.complex64), ) self.fourier_transform = partial( two_dim_matmul, matrix_dim_one=self.dft_mat_seq, matrix_dim_two=self.dft_mat_hidden, ) else: self.fourier_transform = fftn else: self.fourier_transform = fftn def forward(self, hiddens): outputs = self.fourier_transform(hiddens).real return (outputs,) class FNetBasicOutput(qc.Module): def __init__(self, config): super().__init__() self.norm = qc.LayerNorm(config.d_model, eps=config.eps) def forward(self, hiddens, input_tensor): hiddens = self.norm(input_tensor + hiddens) return hiddens class FNetFourierTransform(qc.Module): def __init__(self, config): super().__init__() self.self = FNetBasicFourierTransform(config) self.output = FNetBasicOutput(config) def forward(self, hiddens): self_outputs = self.self(hiddens) fourier_output = self.output(self_outputs[0], hiddens) outputs = (fourier_output,) return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->FNet class FNetIntermediate(qc.Module): def __init__(self, cfg): super().__init__() self.dense = qc.Linear(cfg.d_model, cfg.d_ff) self.act = qu.activation(cfg.act) def forward(self, x): y = self.dense(x) y = self.act(y) return y # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->FNet class FNetOutput(qc.Module): def __init__(self, config): super().__init__() self.dense = qc.Linear(config.d_ff, config.d_model) self.norm = qc.LayerNorm(config.d_model, eps=config.eps) self.drop = qc.Dropout(config.drop) def forward(self, hiddens, input_tensor): hiddens = self.dense(hiddens) hiddens = self.drop(hiddens) hiddens = self.norm(hiddens + input_tensor) return hiddens class FNetLayer(qc.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 # The dimension which has the sequence length self.fourier = FNetFourierTransform(config) self.intermediate = FNetIntermediate(config) self.output = FNetOutput(config) def forward(self, hiddens): self_fourier_outputs = self.fourier(hiddens) fourier_output = self_fourier_outputs[0] layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output ) outputs = (layer_output,) return outputs def feed_forward_chunk(self, fourier_output): intermediate_output = self.intermediate(fourier_output) layer_output = self.output(intermediate_output, fourier_output) return layer_output class FNetEncoder(qc.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config.n_lays)]) self.gradient_checkpointing = False def forward(self, hiddens, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hiddens ) else: layer_outputs = layer_module(hiddens) hiddens = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hiddens,) if not return_dict: return tuple(v for v in [hiddens, all_hidden_states] if v is not None) return qo.Base(y=hiddens, hiddens=all_hidden_states) # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->FNet class FNetPreTrainingHeads(qc.Module): def __init__(self, config): super().__init__() self.predictions = Predictor(config) self.seq_relationship = qc.Linear(config.d_model, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class Model(PreTrained): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = FNetEmbeddings(config) self.encoder = FNetEncoder(config) self.pool = Pool(config) if add_pooling_layer else None def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, output_hidden_states=None, return_dict=None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if ( self.config.use_tpu_fourier_optimizations and seq_length <= 4096 and self.config.tpu_short_seq_length != seq_length ): raise ValueError( "The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to the model when using TPU optimizations." ) device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand( batch_size, seq_length ) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pools = self.pool(sequence_output) if self.pool is not None else None if not return_dict: return (sequence_output, pools) + encoder_outputs[1:] return qo.BaseWithPooling( y=sequence_output, pools=pools, hiddens=encoder_outputs.hiddens, ) class FNetForPreTraining(PreTrained): def __init__(self, config): super().__init__(config) self.fnet = Model(config) self.cls = FNetPreTrainingHeads(config) self.post_init() def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.s_vocab), labels.view(-1) ) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) ) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return qo.LossSeq( loss=total_loss, logits=prediction_scores, orders=seq_relationship_score, hiddens=outputs.hiddens, ) class ForMasked(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Predictor(**kw) forward = qf.forward_masked class FNetForNextPrediction(PreTrained): def __init__(self, config): super().__init__(config) self.fnet = Model(config) self.cls = qc.Linear(config.d_model, 2) self.post_init() def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_hidden_states=None, return_dict=None, **kw, ): if "next_sentence_label" in kw: labels = kw.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hiddens=outputs.hiddens, ) class ForChoice(PreTrained): def __init__(self, config): super().__init__(config) self.fnet = Model(config) self.drop = qc.Dropout(config.drop) self.classifier = qc.Linear(config.d_model, 1) self.post_init() def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None token_type_ids = ( token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None ) position_ids = ( position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None ) inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.fnet( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return qo.WithLoss(loss=loss, logits=reshaped_logits, hiddens=outputs.hiddens) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(**kw) forward = qf.forward_seq class ForTokClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(**kw) forward = qf.forward_tok class ForQA(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = qc.Linear(cfg.d_model, cfg.n_labels, **kw) forward = qf.forward_qa
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,450
quantapix/qnarre
refs/heads/main
/qnarre/core/flash.py
import math import torch import triton import triton.language as tl import flash_attn_cuda import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat from flash_attn import flash_attn_triton from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.bert_padding import unpad_input, pad_input class FlashAttention(nn.Module): def __init__(self, softmax_scale=None, attention_dropout=0.0): super().__init__() self.softmax_scale = softmax_scale self.dropout_p = attention_dropout def forward( self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None, max_s=None, need_weights=False, ): assert not need_weights assert qkv.dtype in [torch.float16, torch.bfloat16] assert qkv.is_cuda if cu_seqlens is None: batch_size = qkv.shape[0] seqlen = qkv.shape[1] if key_padding_mask is None: qkv = rearrange(qkv, "b s ... -> (b s) ...") max_s = seqlen cu_seqlens = torch.arange( 0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32, device=qkv.device ) output = flash_attn_unpadded_qkvpacked_func( qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, softmax_scale=self.softmax_scale, causal=causal, ) output = rearrange(output, "(b s) ... -> b s ...", b=batch_size) else: nheads = qkv.shape[-2] x = rearrange(qkv, "b s three h d -> b s (three h d)") x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask) x_unpad = rearrange(x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads) output_unpad = flash_attn_unpadded_qkvpacked_func( x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, softmax_scale=self.softmax_scale, causal=causal, ) output = rearrange( pad_input( rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, batch_size, seqlen ), "b s (h d) -> b s h d", h=nheads, ) else: assert max_s is not None output = flash_attn_unpadded_qkvpacked_func( qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0, softmax_scale=self.softmax_scale, causal=causal, ) return output, None class FlashMHA(nn.Module): def __init__( self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, causal=False, device=None, dtype=None, ): assert batch_first factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.embed_dim = embed_dim self.causal = causal self.num_heads = num_heads assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" self.head_dim = self.embed_dim // num_heads assert ( self.head_dim % 8 == 0 and self.head_dim <= 128 ), "Only support head_dim <= 128 and divisible by 8" self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs) self.inner_attn = FlashAttention(attention_dropout=attention_dropout) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) def forward(self, x, key_padding_mask=None, need_weights=False): qkv = self.Wqkv(x) qkv = rearrange(qkv, "b s (three h d) -> b s three h d", three=3, h=self.num_heads) context, attn_weights = self.inner_attn( qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=self.causal ) return self.out_proj(rearrange(context, "b s h d -> b s (h d)")), attn_weights # Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128 # @triton.autotune( # configs=[ # triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1), # # This config has a race condition when EVEN_M == False, disabling it for now. # # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64}, num_warps=4, num_stages=1), # ], # key=['CACHE_KEY_SEQLEN_Q', 'CACHE_KEY_SEQLEN_K', 'BIAS_TYPE', 'IS_CAUSAL', 'BLOCK_HEADDIM'] # ) @triton.heuristics( { "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"], } ) @triton.jit def _fwd_kernel( Q, K, V, Bias, Out, Lse, TMP, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_ob, stride_oh, stride_om, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): start_m = tl.program_id(0) off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads # off_b = tl.program_id(1) # off_h = tl.program_id(2) # off_hb = off_b * nheads + off_h # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_HEADDIM) # Initialize pointers to Q, K, V # Adding parenthesis around indexing might use int32 math instead of int64 math? # https://github.com/openai/triton/issues/741 # I'm seeing a tiny bit of difference (5-7us) q_ptrs = ( Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :]) ) k_ptrs = ( K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :]) ) v_ptrs = ( V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :]) ) if BIAS_TYPE == "vector": b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n elif BIAS_TYPE == "matrix": b_ptrs = ( Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :]) ) # initialize pointer to m and l t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32) # load q: it will stay in SRAM throughout # [2022-10-30] TD: Triton bug - in the case of EVEN_M=True and EVEN_N=False, if we just call # tl.load(q_ptrs), we get the wrong output! if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0 ) # loop over k, v and update accumulator end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k) for start_n in range(0, end_n, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: k = tl.load(k_ptrs + start_n * stride_kn) else: k = tl.load(k_ptrs + start_n * stride_kn, mask=offs_d[None, :] < headdim, other=0.0) else: if EVEN_HEADDIM: k = tl.load( k_ptrs + start_n * stride_kn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0, ) else: k = tl.load( k_ptrs + start_n * stride_kn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0, ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k, trans_b=True) # Trying to combine the two masks seem to make the result wrong if not EVEN_N: # Need to mask out otherwise the softmax is wrong qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf")) if IS_CAUSAL: qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf")) if BIAS_TYPE != "none": if BIAS_TYPE == "vector": if EVEN_N: bias = tl.load(b_ptrs + start_n).to(tl.float32) else: bias = tl.load( b_ptrs + start_n, mask=(start_n + offs_n) < seqlen_k, other=0.0 ).to(tl.float32) bias = bias[None, :] elif BIAS_TYPE == "matrix": if EVEN_M & EVEN_N: bias = tl.load(b_ptrs + start_n).to(tl.float32) else: bias = tl.load( b_ptrs + start_n, mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k), other=0.0, ).to(tl.float32) # Slightly faster to multiply the softmax_scale in the tl.exp below since the compiler # can then fuse the mult and add into an fma instruction. But if we have bias we need to # to multiply with softmax_scale here. qk = qk * softmax_scale + bias m_ij = tl.maximum(tl.max(qk, 1), lse_i) p = tl.exp(qk - m_ij[:, None]) else: m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i) p = tl.exp(qk * softmax_scale - m_ij[:, None]) l_ij = tl.sum(p, 1) # scale acc_o acc_o_scale = tl.exp(m_i - m_ij) # # -- update output accumulator -- # BUG: have to store and immediately load tl.store(t_ptrs, acc_o_scale) acc_o_scale = tl.load(t_ptrs) acc_o = acc_o * acc_o_scale[:, None] # update acc_o if EVEN_N & EVEN_M: # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: v = tl.load(v_ptrs + start_n * stride_vn) else: v = tl.load(v_ptrs + start_n * stride_vn, mask=offs_d[None, :] < headdim, other=0.0) else: if EVEN_HEADDIM: v = tl.load( v_ptrs + start_n * stride_vn, mask=(start_n + offs_n)[:, None] < seqlen_k, other=0.0, ) else: v = tl.load( v_ptrs + start_n * stride_vn, mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0, ) p = p.to(v.dtype) acc_o += tl.dot(p, v) # -- update statistics m_i = m_ij l_i_new = tl.exp(lse_i - m_ij) + l_ij lse_i = m_ij + tl.log(l_i_new) o_scale = tl.exp(m_i - lse_i) # BUG: have to store and immediately load tl.store(t_ptrs, o_scale) o_scale = tl.load(t_ptrs) acc_o = acc_o * o_scale[:, None] # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m tl.store(lse_ptrs, lse_i) # initialize pointers to output offs_d = tl.arange(0, BLOCK_HEADDIM) out_ptrs = ( Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :]) ) if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc_o) else: tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim) else: if EVEN_HEADDIM: tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q) else: tl.store( out_ptrs, acc_o, mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim) ) @triton.jit def _bwd_preprocess_do_o_dot( Out, DO, Delta, stride_ob, stride_oh, stride_om, stride_dob, stride_doh, stride_dom, nheads, seqlen_q, seqlen_q_rounded, headdim, BLOCK_M: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, ): start_m = tl.program_id(0) off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_d = tl.arange(0, BLOCK_HEADDIM) # load o = tl.load( Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, ).to(tl.float32) do = tl.load( DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :], mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, ).to(tl.float32) delta = tl.sum(o * do, axis=1) # write-back tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta) @triton.jit def _bwd_store_dk_dv( dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, ): # [2022-11-01] TD: Same bug. In the case of EVEN_N=True and EVEN_M=False, # if we just call tl.store(dv_ptrs), there's a race condition if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim) tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim) else: if EVEN_HEADDIM: tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k) tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k) else: tl.store(dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim)) tl.store(dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim)) @triton.jit def _bwd_kernel_one_col_block( start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD: tl.constexpr, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): # We need to make sure begin_m is a multiple of BLOCK_M (not BLOCK_N) begin_m = 0 if not IS_CAUSAL else ((start_n * BLOCK_N) // BLOCK_M) * BLOCK_M # initialize row/col offsets offs_qm = begin_m + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) offs_m = tl.arange(0, BLOCK_M) offs_d = tl.arange(0, BLOCK_HEADDIM) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :]) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :]) v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :]) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :]) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :]) if BIAS_TYPE == "vector": b_ptrs = Bias + offs_n elif BIAS_TYPE == "matrix": b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :]) # initialize dv and dk dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32) # There seems to be some problem with Triton pipelining that makes results wrong for # headdim=64, seqlen=(113, 255), bias_type='matrix'. In this case the for loop # may have zero step, and pipelining with the bias matrix could screw it up. # So we just exit early. if begin_m >= seqlen_q: dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) _bwd_store_dk_dv( dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, ) return # k and v stay in SRAM throughout # [2022-10-30] TD: Same bug as the fwd. In the case of EVEN_N=True and EVEN_M=False, # if we just call tl.load(k_ptrs), we get the wrong output! if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0) v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0) else: k = tl.load( k_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0 ) v = tl.load( v_ptrs, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim), other=0.0 ) # loop over rows num_block_m = tl.cdiv(seqlen_q, BLOCK_M) for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m # load q, k, v, do on-chip # Same bug as below. Otherwise gives wrong result for headdim=40, seqlen=(128, 117) if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, ) # recompute p = softmax(qk, dim=-1).T qk = tl.dot(q, k, trans_b=True) # Trying to combine the two masks seem to make the result wrong if not EVEN_N: # Need to mask out otherwise the softmax is wrong qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf")) if IS_CAUSAL: qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf")) if BIAS_TYPE != "none": tl.debug_barrier() # Race condition otherwise if BIAS_TYPE == "vector": if EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(tl.float32) bias = bias[None, :] elif BIAS_TYPE == "matrix": if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load( b_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_n[None, :] < seqlen_k), other=0.0, ).to(tl.float32) qk = qk * softmax_scale + bias # There seems to be a race condition when headdim=48/96, and dq, dk, dv are wrong. # Also wrong for headdim=64. if not (EVEN_M & EVEN_HEADDIM): tl.debug_barrier() lse_i = tl.load(LSE + offs_m_curr) if BIAS_TYPE == "none": p = tl.exp(qk * softmax_scale - lse_i[:, None]) else: p = tl.exp(qk - lse_i[:, None]) # compute dv # [2022-10-30] TD: A Triton bug: if EVEN_M=True and EVEN_HEADDIM=False, if we call # do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0), we get wrong outputs # in the case of headdim=48/96, seqlen_q & seqlen_k >= 512. If headdim=40 or seqlen < 512, # the output is correct. if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) else: # [2022-11-01] TD: Triton bug, there's a race condition if we just use m_mask and not d_mask. do = tl.load( do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, ) # if EVEN_M: # if EVEN_HEADDIM: # do = tl.load(do_ptrs) # else: # do = tl.load(do_ptrs, mask=offs_d[None, :] < headdim, other=0.0) # else: # if EVEN_HEADDIM: # do = tl.load(do_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0) # else: # do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) # & (offs_d[None, :] < headdim), other=0.0) dv += tl.dot(p.to(do.dtype), do, trans_a=True) # compute dp = dot(v, do) # There seems to be a race condition when headdim=48/96, and dq, dk are wrong. # Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True # Also wrong for headdim=64, seqlen=(1023, 1024), and ATOMIC_ADD=False if not (EVEN_M & EVEN_HEADDIM): tl.debug_barrier() dp = tl.dot(do, v, trans_b=True) # There's a race condition for headdim=48 if not EVEN_HEADDIM: tl.debug_barrier() # compute ds = p * (dp - delta[:, None]) # Putting the subtraction after the dp matmul (instead of before) is slightly faster Di = tl.load(D + offs_m_curr) # Converting ds to q.dtype here reduces register pressure and makes it much faster # for BLOCK_HEADDIM=128 ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype) # compute dk = dot(ds.T, q) dk += tl.dot(ds, q, trans_a=True) # compute dq if not ( EVEN_M & EVEN_HEADDIM ): # Otherewise there's a race condition when BIAS_TYPE='matrix' tl.debug_barrier() if not ATOMIC_ADD: if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M dq = tl.load(dq_ptrs, eviction_policy="evict_last") dq += tl.dot(ds, k) tl.store(dq_ptrs, dq, eviction_policy="evict_last") else: if EVEN_HEADDIM: dq = tl.load( dq_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0, eviction_policy="evict_last", ) dq += tl.dot(ds, k) tl.store( dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q, eviction_policy="evict_last", ) else: dq = tl.load( dq_ptrs, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), other=0.0, eviction_policy="evict_last", ) dq += tl.dot(ds, k) tl.store( dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), eviction_policy="evict_last", ) else: # If we're parallelizing across the seqlen_k dimension dq = tl.dot(ds, k) if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M tl.atomic_add(dq_ptrs, dq) else: if EVEN_HEADDIM: tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q) else: tl.atomic_add( dq_ptrs, dq, mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim), ) # increment pointers dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom if BIAS_TYPE == "matrix": b_ptrs += BLOCK_M * stride_bm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :]) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :]) _bwd_store_dk_dv( dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, ) def init_to_zero(name): return lambda nargs: nargs[name].zero_() @triton.autotune( configs=[ triton.Config( {"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero("DQ"), ), triton.Config( {"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero("DQ"), ), # Other configs seem to give wrong results when seqlen_q % 128 != 0, disabling them for now # # Kernel is buggy (give wrong result) if we set BLOCK_m=128, BLOCK_n=64, num_warps=*4* # triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), # triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=8, num_stages=1, pre_hook=init_to_zero('DQ')), # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": False}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')), # triton.Config({"BLOCK_M": 64, "BLOCK_N": 64, "SEQUENCE_PARALLEL": True}, num_warps=4, num_stages=1, pre_hook=init_to_zero('DQ')), ], key=["CACHE_KEY_SEQLEN_Q", "CACHE_KEY_SEQLEN_K", "BIAS_TYPE", "IS_CAUSAL", "BLOCK_HEADDIM"], ) @triton.heuristics( { "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"], } ) @triton.jit def _bwd_kernel( Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_vn, stride_bb, stride_bh, stride_bm, stride_dob, stride_doh, stride_dom, stride_dqb, stride_dqh, stride_dqm, stride_dkb, stride_dkh, stride_dkn, stride_dvb, stride_dvh, stride_dvn, nheads, seqlen_q, seqlen_k, seqlen_q_rounded, headdim, CACHE_KEY_SEQLEN_Q, CACHE_KEY_SEQLEN_K, BIAS_TYPE: tl.constexpr, IS_CAUSAL: tl.constexpr, BLOCK_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): off_hb = tl.program_id(1) off_b = off_hb // nheads off_h = off_hb % nheads # offset pointers for batch/head Q += off_b * stride_qb + off_h * stride_qh K += off_b * stride_kb + off_h * stride_kh V += off_b * stride_vb + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh if BIAS_TYPE != "none": Bias += off_b * stride_bb + off_h * stride_bh # pointer to row-wise quantities in value-like data D += off_hb * seqlen_q_rounded LSE += off_hb * seqlen_q_rounded if not SEQUENCE_PARALLEL: num_block_n = tl.cdiv(seqlen_k, BLOCK_N) for start_n in range(0, num_block_n): _bwd_kernel_one_col_block( start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=False, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, ) else: start_n = tl.program_id(0) _bwd_kernel_one_col_block( start_n, Q, K, V, Bias, DO, DQ, DK, DV, LSE, D, softmax_scale, stride_qm, stride_kn, stride_vn, stride_bm, stride_dom, stride_dqm, stride_dkn, stride_dvn, seqlen_q, seqlen_k, headdim, ATOMIC_ADD=True, BIAS_TYPE=BIAS_TYPE, IS_CAUSAL=IS_CAUSAL, BLOCK_HEADDIM=BLOCK_HEADDIM, EVEN_M=EVEN_M, EVEN_N=EVEN_N, EVEN_HEADDIM=EVEN_HEADDIM, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, ) def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None): # shape constraints batch, seqlen_q, nheads, d = q.shape _, seqlen_k, _, _ = k.shape assert k.shape == (batch, seqlen_k, nheads, d) assert v.shape == (batch, seqlen_k, nheads, d) assert d <= 128, "FlashAttention only support head dimensions up to 128" assert q.dtype == k.dtype == v.dtype, "All tensors must have the same type" assert q.dtype in [torch.float16, torch.bfloat16], "Only support fp16 and bf16" assert q.is_cuda and k.is_cuda and v.is_cuda softmax_scale = softmax_scale or 1.0 / math.sqrt(d) has_bias = bias is not None bias_type = "none" if has_bias: assert bias.dtype in [q.dtype, torch.float] assert bias.is_cuda assert bias.dim() == 4 if bias.stride(-1) != 1: bias = bias.contiguous() if bias.shape[2:] == (1, seqlen_k): bias_type = "vector" elif bias.shape[2:] == (seqlen_q, seqlen_k): bias_type = "matrix" else: raise RuntimeError( "Last 2 dimensions of bias must be (1, seqlen_k)" " or (seqlen_q, seqlen_k)" ) bias = bias.expand(batch, nheads, seqlen_q, seqlen_k) bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0) seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128 lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32) tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32) o = torch.empty_like(q) BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16) BLOCK = 128 num_warps = 4 if d <= 64 else 8 grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads) _fwd_kernel[grid]( q, k, v, bias, o, lse, tmp, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, o.stride(0), o.stride(2), o.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations) # Can't use kwargs here because triton autotune expects key to be args, not kwargs # IS_CAUSAL=causal, BLOCK_HEADDIM=d, bias_type, causal, BLOCK_HEADDIM, BLOCK_M=BLOCK, BLOCK_N=BLOCK, num_warps=num_warps, num_stages=1, ) return o, lse, softmax_scale # softmax_scale could have been updated def _flash_attn_backward( do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None ): # Make sure that the last dimension is contiguous if do.stride(-1) != 1: do = do.contiguous() batch, seqlen_q, nheads, d = q.shape _, seqlen_k, _, _ = k.shape # assert d in {16, 32, 64, 128} assert d <= 128 seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128 assert lse.shape == (batch, nheads, seqlen_q_rounded) assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1 assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1 softmax_scale = softmax_scale or 1.0 / math.sqrt(d) # dq_accum = torch.zeros_like(q, dtype=torch.float32) dq_accum = torch.empty_like(q, dtype=torch.float32) delta = torch.empty_like(lse) # delta = torch.zeros_like(lse) BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16) grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads) _bwd_preprocess_do_o_dot[grid]( o, do, delta, o.stride(0), o.stride(2), o.stride(1), do.stride(0), do.stride(2), do.stride(1), nheads, seqlen_q, seqlen_q_rounded, d, BLOCK_M=128, BLOCK_HEADDIM=BLOCK_HEADDIM, ) has_bias = bias is not None bias_type = "none" if has_bias: assert bias.dtype in [q.dtype, torch.float] assert bias.is_cuda assert bias.dim() == 4 assert bias.stride(-1) == 1 if bias.shape[2:] == (1, seqlen_k): bias_type = "vector" elif bias.shape[2:] == (seqlen_q, seqlen_k): bias_type = "matrix" else: raise RuntimeError( "Last 2 dimensions of bias must be (1, seqlen_k)" " or (seqlen_q, seqlen_k)" ) bias = bias.expand(batch, nheads, seqlen_q, seqlen_k) bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0) # BLOCK_M = 128 # BLOCK_N = 64 # num_warps = 4 grid = lambda META: ( triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1, batch * nheads, ) _bwd_kernel[grid]( q, k, v, bias, do, dq_accum, dk, dv, lse, delta, softmax_scale, q.stride(0), q.stride(2), q.stride(1), k.stride(0), k.stride(2), k.stride(1), v.stride(0), v.stride(2), v.stride(1), *bias_strides, do.stride(0), do.stride(2), do.stride(1), dq_accum.stride(0), dq_accum.stride(2), dq_accum.stride(1), dk.stride(0), dk.stride(2), dk.stride(1), dv.stride(0), dv.stride(2), dv.stride(1), nheads, seqlen_q, seqlen_k, seqlen_q_rounded, d, seqlen_q // 32, seqlen_k // 32, # key for triton cache (limit number of compilations) # Can't use kwargs here because triton autotune expects key to be args, not kwargs # IS_CAUSAL=causal, BLOCK_HEADDIM=d, bias_type, causal, BLOCK_HEADDIM, # SEQUENCE_PARALLEL=False, # BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, # num_warps=num_warps, # num_stages=1, ) dq.copy_(dq_accum) class FlashAttnQKVPackedFunc(torch.autograd.Function): @staticmethod def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None): """ qkv: (batch, seqlen, 3, nheads, headdim) bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen). For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen). ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen) """ # Make sure that the last dimension is contiguous if qkv.stride(-1) != 1: qkv = qkv.contiguous() o, lse, ctx.softmax_scale = _flash_attn_forward( qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], bias=bias, causal=causal, softmax_scale=softmax_scale, ) ctx.save_for_backward(qkv, o, lse, bias) ctx.causal = causal return o @staticmethod def backward(ctx, do): qkv, o, lse, bias = ctx.saved_tensors assert not ctx.needs_input_grad[1], "FlashAttention does not support bias gradient yet" # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. with torch.inference_mode(): dqkv = torch.empty_like(qkv) _flash_attn_backward( do, qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2], o, lse, dqkv[:, :, 0], dqkv[:, :, 1], dqkv[:, :, 2], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale, ) return dqkv, None, None, None flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply class FlashAttnKVPackedFunc(torch.autograd.Function): @staticmethod def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None): """ q: (batch, seqlen_q, nheads, headdim) kv: (batch, seqlen_k, 2, nheads, headdim) bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) """ # Make sure that the last dimension is contiguous q, kv = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]] o, lse, ctx.softmax_scale = _flash_attn_forward( q, kv[:, :, 0], kv[:, :, 1], bias=bias, causal=causal, softmax_scale=softmax_scale ) ctx.save_for_backward(q, kv, o, lse, bias) ctx.causal = causal return o @staticmethod def backward(ctx, do): q, kv, o, lse, bias = ctx.saved_tensors if len(ctx.needs_input_grad) >= 3: assert not ctx.needs_input_grad[2], "FlashAttention does not support bias gradient yet" # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. with torch.inference_mode(): dq = torch.empty_like(q) dkv = torch.empty_like(kv) _flash_attn_backward( do, q, kv[:, :, 0], kv[:, :, 1], o, lse, dq, dkv[:, :, 0], dkv[:, :, 1], bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale, ) return dq, dkv, None, None, None flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply class FlashAttnFunc(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None): """ q: (batch_size, seqlen_q, nheads, headdim) k, v: (batch_size, seqlen_k, nheads, headdim) bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k). For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k). ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k) """ # Make sure that the last dimension is contiguous q, k, v = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]] o, lse, ctx.softmax_scale = _flash_attn_forward( q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale ) ctx.save_for_backward(q, k, v, o, lse, bias) ctx.causal = causal return o @staticmethod def backward(ctx, do): q, k, v, o, lse, bias = ctx.saved_tensors assert not ctx.needs_input_grad[3], "FlashAttention does not support bias gradient yet" # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. with torch.inference_mode(): dq = torch.empty_like(q) dk = torch.empty_like(k) dv = torch.empty_like(v) _flash_attn_backward( do, q, k, v, o, lse, dq, dk, dv, bias=bias, causal=ctx.causal, softmax_scale=ctx.softmax_scale, ) return dq, dk, dv, None, None, None flash_attn_func = FlashAttnFunc.apply def flash_attn_unpadded_unpacked_func_triton(q, k, v, bias=None, causal=False, softmax_scale=None): return flash_attn_triton.flash_attn_func(q, k, v, bias, causal, softmax_scale) def _flash_attn_forward_cuda( q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, return_softmax, num_splits=0, generator=None, ): """ num_splits: how much to parallelize over the seqlen_q dimension. num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. Don't change it unless you know what you're doing. """ softmax_lse, *rest = flash_attn_cuda.fwd( q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal, return_softmax, num_splits, generator, ) # if out.isnan().any() or softmax_lse.isnan().any(): # breakpoint() S_dmask = rest[0] if return_softmax else None return out, softmax_lse, S_dmask def _flash_attn_backward_cuda( dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, num_splits=0, generator=None, ): """ num_splits: whether to parallelize over the seqlen_k dimension (num_splits > 1) or not (num_splits = 1). num_splits=0 means it will be set by an internal heuristic. Any value above 1 will call the same kernel (i.e. num_splits=2 would call the same kernel as num_splits=3), so effectively the choices are 0, 1, and 2. This hyperparameter can be tuned for performance, but default value (heuristic) should work fine. """ _, _, _, softmax_d = flash_attn_cuda.bwd( dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, False, causal, num_splits, generator, ) # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any(): # breakpoint() return dq, dk, dv, softmax_d class FlashAttnQKVPackedFunc(torch.autograd.Function): @staticmethod def forward( ctx, qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal, return_softmax, ): # Save rng_state because the backward pass will regenerate the dropout mask rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None if softmax_scale is None: softmax_scale = qkv.shape[-1] ** (-0.5) out, softmax_lse, S_dmask = _flash_attn_forward_cuda( qkv[:, 0], qkv[:, 1], qkv[:, 2], torch.empty_like(qkv[:, 0]), cu_seqlens, cu_seqlens, max_seqlen, max_seqlen, dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax, ) ctx.save_for_backward(qkv, out, softmax_lse, cu_seqlens, rng_state) ctx.dropout_p = dropout_p ctx.max_seqlen = max_seqlen ctx.softmax_scale = softmax_scale ctx.causal = causal return out if not return_softmax else (out, softmax_lse, S_dmask) @staticmethod def backward(ctx, dout, *args): qkv, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors if rng_state is not None: cur_rng_state = torch.cuda.get_rng_state() torch.cuda.set_rng_state(rng_state) dqkv = torch.empty_like(qkv) _flash_attn_backward_cuda( dout, qkv[:, 0], qkv[:, 1], qkv[:, 2], out, softmax_lse, dqkv[:, 0], dqkv[:, 1], dqkv[:, 2], cu_seqlens, cu_seqlens, ctx.max_seqlen, ctx.max_seqlen, ctx.dropout_p, ctx.softmax_scale, ctx.causal, ) if rng_state is not None: torch.cuda.set_rng_state(cur_rng_state) return dqkv, None, None, None, None, None, None def flash_attn_unpadded_qkvpacked_func_cuda( qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale=None, causal=False, return_attn_probs=False, ): return FlashAttnQKVPackedFunc.apply( qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale, causal, return_attn_probs ) class FlashAttnKVPackedFunc(torch.autograd.Function): @staticmethod def forward( ctx, q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, return_softmax, ): # Save rng_state because the backward pass will regenerate the dropout mask rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None if softmax_scale is None: softmax_scale = q.shape[-1] ** (-0.5) out, softmax_lse, S_dmask = _flash_attn_forward_cuda( q, kv[:, 0], kv[:, 1], torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax, ) ctx.save_for_backward(q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state) ctx.dropout_p = dropout_p ctx.max_seqlen_q = max_seqlen_q ctx.max_seqlen_k = max_seqlen_k ctx.softmax_scale = softmax_scale ctx.causal = causal return out if not return_softmax else (out, softmax_lse, S_dmask) @staticmethod def backward(ctx, dout, *args): ( q, kv, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state, ) = ctx.saved_tensors if rng_state is not None: cur_rng_state = torch.cuda.get_rng_state() torch.cuda.set_rng_state(rng_state) dq = torch.empty_like(q) dkv = torch.empty_like(kv) _flash_attn_backward_cuda( dout, q, kv[:, 0], kv[:, 1], out, softmax_lse, dq, dkv[:, 0], dkv[:, 1], cu_seqlens_q, cu_seqlens_k, ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal, ) if rng_state is not None: torch.cuda.set_rng_state(cur_rng_state) return dq, dkv, None, None, None, None, None, None, None, None def flash_attn_unpadded_kvpacked_func_cuda( q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=None, causal=False, return_attn_probs=False, ): """dropout_p should be set to 0.0 during evaluation Arguments: q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch. kv: (total_k, 2, nheads, headdim), where total_k = total number of key tokens in the batch. cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, used to index into q. cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, used to index into kv. max_seqlen_q: int. Maximum query sequence length in the batch. max_seqlen_k: int. Maximum key sequence length in the batch. dropout_p: float. Dropout probability. softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(headdim). causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). return_attn_probs: bool. Whether to return the attention probabilities. This option is for testing only. The returned probabilities are not guaranteed to be correct (they might not have the right scaling). Return: out: (total, nheads, headdim). softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax normalization factor). S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen). The output of softmax (possibly with different scaling). It also encodes the dropout pattern (negative means that location was dropped, nonnegative means it was kept). """ return FlashAttnKVPackedFunc.apply( q, kv, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, return_attn_probs, ) class FlashAttnFunc(torch.autograd.Function): @staticmethod def forward( ctx, q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, return_softmax, ): # Save rng_state because the backward pass will regenerate the dropout mask rng_state = torch.cuda.get_rng_state() if dropout_p > 0 else None if softmax_scale is None: softmax_scale = q.shape[-1] ** (-0.5) out, softmax_lse, S_dmask = _flash_attn_forward_cuda( q, k, v, torch.empty_like(q), cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal=causal, return_softmax=return_softmax, ) ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state) ctx.dropout_p = dropout_p ctx.max_seqlen_q = max_seqlen_q ctx.max_seqlen_k = max_seqlen_k ctx.softmax_scale = softmax_scale ctx.causal = causal return out if not return_softmax else (out, softmax_lse, S_dmask) @staticmethod def backward(ctx, dout, *args): ( q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state, ) = ctx.saved_tensors if rng_state is not None: cur_rng_state = torch.cuda.get_rng_state() torch.cuda.set_rng_state(rng_state) dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v) _flash_attn_backward_cuda( dout, q, k, v, out, softmax_lse, dq, dk, dv, cu_seqlens_q, cu_seqlens_k, ctx.max_seqlen_q, ctx.max_seqlen_k, ctx.dropout_p, ctx.softmax_scale, ctx.causal, ) if rng_state is not None: torch.cuda.set_rng_state(cur_rng_state) return dq, dk, dv, None, None, None, None, None, None, None, None def flash_attn_unpadded_func_cuda( q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=None, causal=False, return_attn_probs=False, ): """dropout_p should be set to 0.0 during evaluation Arguments: q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch. k: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch. v: (total_k, nheads, headdim), where total_k = total number of key tokens in the batch. cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, used to index into q. cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, used to index into kv. max_seqlen_q: int. Maximum query sequence length in the batch. max_seqlen_k: int. Maximum key sequence length in the batch. dropout_p: float. Dropout probability. softmax_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(headdim). causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). return_attn_probs: bool. Whether to return the attention probabilities. This option is for testing only. The returned probabilities are not guaranteed to be correct (they might not have the right scaling). Return: out: (total, nheads, headdim). softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax normalization factor). S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen). The output of softmax (possibly with different scaling). It also encodes the dropout pattern (negative means that location was dropped, nonnegative means it was kept). """ return FlashAttnFunc.apply( q, k, v, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale, causal, return_attn_probs, ) class IndexFirstAxis(torch.autograd.Function): @staticmethod def forward(ctx, input, indices): ctx.save_for_backward(indices) assert input.ndim >= 2 ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:] second_dim = other_shape.numel() # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing. # return input[indices] return torch.gather( rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim) ).reshape(-1, *other_shape) @staticmethod def backward(ctx, grad_output): (indices,) = ctx.saved_tensors assert grad_output.ndim >= 2 other_shape = grad_output.shape[1:] grad_output = rearrange(grad_output, "b ... -> b (...)") grad_input = torch.zeros( [ctx.first_axis_dim, grad_output.shape[1]], device=grad_output.device, dtype=grad_output.dtype, ) # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing. # grad_input[indices] = grad_output grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output) return grad_input.reshape(ctx.first_axis_dim, *other_shape), None index_first_axis = IndexFirstAxis.apply class IndexPutFirstAxis(torch.autograd.Function): @staticmethod def forward(ctx, values, indices, first_axis_dim): ctx.save_for_backward(indices) assert indices.ndim == 1 assert values.ndim >= 2 output = torch.zeros( first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype ) # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing. output[indices] = values # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values) return output @staticmethod def backward(ctx, grad_output): (indices,) = ctx.saved_tensors # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing. grad_values = grad_output[indices] # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1])) return grad_values, None, None index_put_first_axis = IndexPutFirstAxis.apply class IndexFirstAxisResidual(torch.autograd.Function): @staticmethod def forward(ctx, input, indices): ctx.save_for_backward(indices) assert input.ndim >= 2 ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:] second_dim = other_shape.numel() # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing. output = input[indices] # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last # memory format to channel_first. In other words, input might not be contiguous. # If we don't detach, Pytorch complains about output being a view and is being modified inplace return output, input.detach() @staticmethod def backward(ctx, grad_output, grad_residual): (indices,) = ctx.saved_tensors assert grad_output.ndim >= 2 other_shape = grad_output.shape[1:] assert grad_residual.shape[1:] == other_shape grad_input = grad_residual # grad_input[indices] += grad_output indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1))) indices = indices.expand_as(grad_output) grad_input.scatter_add_(0, indices, grad_output) return grad_input.reshape(ctx.first_axis_dim, *other_shape), None index_first_axis_residual = IndexFirstAxisResidual.apply def unpad_input(hidden_states, attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to # index with integer indices. Moreover, torch's index is a bit slower than it needs to be, # so we write custom forward and backward to make it a bit faster. return ( index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices), indices, cu_seqlens, max_seqlen_in_batch, ) def pad_input(hidden_states, indices, batch, seqlen): dim = hidden_states.shape[-1] # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype) # output[indices] = hidden_states output = index_put_first_axis(hidden_states, indices, batch * seqlen) return rearrange(output, "(b s) ... -> b s ...", b=batch)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,451
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/record.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # from difflib import unified_diff from .junk import Junk from .log import Logger from .reader import Reader from .exporter import Exporter from .sanitizer import sanitize from .error import ExcludeException from .header import DocFields, EmlFields, Header from .base import config, Hdr, Record, LnkProximity, LnkAudience from .nominals import compare, para_make, para_split, para_join, quoter from .header import TxtFields, ScrFields, InlFields, FwdFields, MixFields log = Logger(__name__) class Record(Record, Exporter): raw = None junk = None source = None no_date = False label = 'record' _text = None @classmethod def filterer(cls, path, ctxt, cntr, **kw): kw.update(ctxt=ctxt, cntr=cntr) rs = iter(cls.reader(Reader(path), **kw)) r = False while True: r = r or next(rs) try: src, raw = r fs = cls.fields.extract(raw, **kw) if fs: fs, txt = fs yield raw, Header(vars(fs), **kw), txt, src except ExcludeException as e: ctxt.filters.flog.append(ctxt.current, vars(fs)) cntr.incr('-') r = rs.throw(e) continue except ValueError as e2: log.warning('Failed reading record {}', e2) # assert False cntr.incr('F') r = False @classmethod def importer(cls, path, ctxt, **kw): kw.update(ctxt=ctxt, sort_mbox=True) for raw, hdr, txt, src in cls.filterer(path, **kw): if txt is None: txt = '\n'.join(ctxt.extract(hdr.record_id, raw, **kw)) if 'UnicodeError' not in txt: yield cls(hdr, src, txt) @classmethod def create_from(cls, src, quote, **kw): fs = cls.fields.extract(quote, **kw) if fs: fs, txt = fs return cls(Header(vars(fs), **kw), src, txt) def __init__(self, hdr, source=None, raw=None): self.hdr = hdr if source is not None: self.source = source if raw is not None: self.raw = raw def __eq__(self, other): if isinstance(other, type(self)): return self.hdr == other.hdr return NotImplemented def __repr__(self): s = self.source if s: return '{}({!r}, {!r})'.format(type(self).__name__, self.hdr, s) return '{}({!r})'.format(type(self).__name__, self.hdr) @property def name(self): return self.hdr.name @property def slug(self): return self.hdr.slug @property def audience(self): ls = (getattr(self.hdr, f, ()) for f in ('from_', 'to', 'cc', 'bcc')) ns = sorted(set(e for s in ls if s is not None for e in s)) return ', '.join(ns) @property def zero_secs(self): return self.hdr.name, def text(self, ctxt=None, **_): if self._text is None: r = self.raw if r is None: self._text = para_join(ctxt.texts.get(self.name, ())) else: if self.junk: r = sanitize(r) self.raw = self.junk.dejunk_text(r) self.junk = False return self.raw elif self.raw is not None: del self.raw return self._text def topic(self, ctxt=None, **_): if self._topic is None: t = ctxt.topics.resolve_all(self.name, self.subject(ctxt), self.audience, self.hdr.topic) self._topic = t or config.TBD return self._topic def subject(self, ctxt=None, **_): if self._subject is None: ss = ctxt.subjects self._subject = ss.resolve_all(self.name, self.hdr.subject) return self._subject def reducer(self, **kw): ts = [] for lv, qs in quoter(self.text().splitlines()): if lv == 0: qs = '\n'.join(qs).strip() if qs: ts.append(qs) else: es = [] for m in (InlRec.create_from, FwdRec.create_from): try: yield lv, m(self.source, qs, **kw, raise_exclude=False) break except Exception as e: es.append(e) # import traceback as tb # tb.print_tb(e.__traceback__) else: f = 'Quoting failed {}\n{!r}\n{!r}' log.warning(f, self.name, qs, es) assert False self._text = para_make('\n'.join(ts)) def expand(self, txt, ctxt): self._text = txt ctxt.texts.expand(self.hdr.name, para_split(txt)) ctxt.nominals.append(txt) def consolidate(self, others, ctxt, cntr, **kw): ds = [] old = None h = self.hdr t = self.text() for o in sorted(others, key=lambda m: m.name): oh = o.hdr hc = h.compare(oh) ot = o.text(ctxt) tc = compare(t, ot) if tc: if hc in (config.EQ, config.LT): assert not old if tc is config.GT: # o.expand(t, ctxt) pass cntr.incr('=' if hc is config.EQ else '<') return elif hc is config.GT: if tc is config.GT: # o.expand(t, ctxt) pass elif tc is config.LT: t = ot assert not old old, o.hdr = oh.name, h if self.source is not None: o.source = self.source self = o continue ds.append(oh.date) if h.name == oh.name: h.date.after(ds) if not old: return self ctxt.rename_msg(old, self.name) self.register(ctxt) cntr.incr('>') def rename(self, old, new): h = self.hdr if h.replying == old: h.replying = new q = h.quoting if q and old in q: q = tuple(new if e == old else e for e in q) if q: h.quoting = q else: del h.quoting def rectify(self, ctxt, force=False, **_): h = self.hdr r = h.replying if r: try: h.replying = r = ctxt.mids[r] except KeyError: if not force: return q = h.quoting if q and r in q: q = tuple(e for e in q if e != r) if q: h.quoting = q else: del h.quoting def register(self, ctxt, **_): h = self.hdr n, m = h.name, h.record_id if m: ctxt.mids[m] = n del h.record_id self.rectify(ctxt, force=True) s = self.subject(ctxt) if s: ctxt.subjects[s] = n t = self.topic(ctxt) if t: ctxt.topics[t] = n s = self.source if s: ctxt.sources[s] = n t = self.text() ctxt.texts.register(n, para_split(t)) ctxt.nominals.append(t) def undirected(self, links=(), **_): if links is not None: h, n = self.hdr, self.name ls = (l for l in Hdr.links if not l.directed and (not links or l in links)) for l in ls: o = getattr(h, l.label) if isinstance(o, tuple): for i in o: if i: yield i, n, l elif o: yield o, n, l def edger(self, links=(), directed=True, **kw): if links is not None: h, n = self.hdr, self.name ls = (l for l in Hdr.links if l.directed and (not links or l in links)) for l in ls: o = getattr(h, l.label) if isinstance(o, tuple): for i in o: if i and '|' in i: yield i, n, l elif o and '|' in o: yield o, n, l a = LnkAudience if not links or a in links: yield self.name, self.audience, a if not directed: yield from self.undirected(links, **kw) class TxtRec(Record): reader = Reader.from_tbox fields = TxtFields def edger(self, links=(), directed=True, **kw): if links is not None: yield from super().edger(links, directed, **kw) p = LnkProximity if not links or p in links: h = self.hdr yield h.name, h.date.proximity, p class MixRec(Record): reader = Reader.from_bbox fields = MixFields junk = Junk() @property def zero_secs(self): return self.hdr.date.zero_secs class ScrRec(TxtRec): no_date = True reader = Reader.from_sbox fields = ScrFields class DocRec(Record): reader = Reader.from_docs fields = DocFields class PicRec(DocRec): pass class BlogRec(DocRec): pass class StoryRec(DocRec): reader = Reader.from_main class EmlRec(Record): reader = Reader.from_mbox fields = EmlFields junk = Junk() @property def zero_secs(self): return self.hdr.date.zero_secs class InlRec(EmlRec): fields = InlFields junk = None class FwdRec(EmlRec): fields = FwdFields junk = None
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,452
quantapix/qnarre
refs/heads/main
/qnarre/base/doc/dispatch.py
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .blog import Blog from .base import config from .mboxes import Mboxes from .meta import converter from .context import Context from .counter import counters from .analyzer import Analyzer from .log import Logger, start_stop_log from .resource import Resource, resource from .realm import Realm, realm_as, Agent from .edit import protect, redact, obfuscate log = Logger(__name__) class Dispatch(Resource): _res_path = config.qnar_dst + 'dispatch.qnr' _blog = 'blog' _ctxt = None @classmethod def globals(cls): return globals() @property def ctxt(self): if self._ctxt is None: self._ctxt = Context.create(self.base, self.realm) return self._ctxt def filt_mbox(self, pool=None, **kw): with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt) with start_stop_log(log, 'Filtering '): if pool: Mboxes(self.base).pool_filt(**kw) else: Mboxes(self.base).filt_mbox(**kw) def merge_mbox(self, pool=None, wdir=None, **kw): wdir = wdir or config.ARCH wdir = config.recs_src + '/' + wdir + '/' + config.MBOX with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt, wdir=wdir) with start_stop_log(log, 'Merging '): if pool: Mboxes(self.base).pool_merge(**kw) else: Mboxes(self.base).merge_mbox(**kw) def strip_mbox(self, pool=None, wdir=None, **kw): wdir = wdir or config.ARCH wdir = config.recs_src + '/' + wdir + '/' + config.MBOX with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt, wdir=wdir) with start_stop_log(log, 'Stripping '): if pool: Mboxes(self.base).pool_strip(**kw) else: Mboxes(self.base).strip_mbox(**kw) def import_from(self, src, **kw): with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt) with start_stop_log(log, 'Importing from ' + src): ctxt.recs.import_from(src, **kw) def protect(self, **kw): with resource(self.ctxt) as ctxt: for n, t in config.bridge_aliases[self.realm]: ctxt.add_alias(n, t) r = config.PRIV with start_stop_log(log, 'Protecting from ' + r): s = Context.create(self.base, r) ctxt.recs.copy_from(s, protect, **kw, ctxt=ctxt) def redact(self, **kw): with resource(self.ctxt) as ctxt: for n, t in config.bridge_aliases[self.realm]: ctxt.add_alias(n, t) for r in (config.PROT, config.PRIV): with start_stop_log(log, 'Redacting from ' + r): s = Context.create(self.base, r) ctxt.recs.copy_from(s, redact, **kw, ctxt=ctxt) def obfuscate(self, **kw): with resource(self.ctxt) as ctxt: for n, t in config.bridge_aliases[self.realm]: ctxt.add_alias(n, t) for r in (config.PUBL, config.PROT, config.PRIV): with start_stop_log(log, 'Obfuscating from ' + r): s = Context.create(self.base, r) ctxt.recs.copy_from(s, obfuscate, **kw, ctxt=ctxt) def check_recs(self, **kw): a = Analyzer() with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt) with start_stop_log(log, 'Checking '): ms = ctxt.recs a.check_sanity(ms.grapher(**kw, links=None), **kw) a.check_coherence(ms.grapher(**kw, links=None), **kw) def graph_recs(self, **kw): pass # with resource(self.ctxt) as ctxt: # q = config.qnar_dst # with graph(ctxt.base / (q + '/qnarre.dot'), **kw) as g: # for c in TxtChains.creator(ctxt.recs): # for n in c: # print(n) def export_all(self, kind, **kw): with resource(self.ctxt) as ctxt: kw.update(ctxt=ctxt) with start_stop_log(log, 'Exporting ' + kind): src = self.base / (config.SRC + self.realm) dst = self.base / (config.DST + self.realm) if kind is config.ORGS: from .images import Orgs Orgs(src, dst).export_all(**kw) elif kind is config.IMGS: from .images import Pngs Pngs(src, dst).export_all(**kw) elif kind is config.PICS: from .images import Jpgs Jpgs(src, dst).export_all(**kw) elif kind is config.MBOX: Mboxes(self.base).export_to(dst, **kw) elif kind is config.BLOG: Blog(self.base).populate(dst, **kw) convert_args = ((('converted', '.'), ('failed', 'F')), 'Converting:') def convert(self, regy, **kw): with start_stop_log(log, 'Converting ' + self.realm.upper()): ctxt = self.ctxt with counters(self.convert_args, kw) as cs: with realm_as(Realm.realms[self.realm]): for c in ctxt.contacts: Agent.convert(c, regy=regy) cs.incr('.') for _, rs in ctxt.recs.chainer(**kw, ctxt=ctxt): for r in rs: converter.convert(r, regy=regy, ctxt=ctxt)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,453
quantapix/qnarre
refs/heads/main
/qnarre/models/big_bird.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import numpy as np import torch from dataclasses import dataclass from torch import nn from torch.nn import functional as F from transformers.utils import logging from torch.utils.checkpoint import checkpoint from .. import core as qc from ..core import utils as qu from ..core import output as qo from ..core import forward as qf from ..core import attention as qa from ..core.embed import Embed from ..core.mlp import Classifier, MLP, Predictor, Pool from ..prep.config.big_bird import PreTrained from . import bert log = logging.get_logger(__name__) class ForCausal(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Predictor(**kw) def forward(self, x, labels=None, **kw): cfg = self.cfg ys = self.model(x, **kw) y = self.proj(ys[0]) loss = None if labels is not None: sl = y[:, :-1, :].contiguous() ls = labels[:, 1:].contiguous() loss = nn.CrossEntropyLoss()(sl.view(-1, cfg.s_vocab), ls.view(-1)) ys = (y,) + ys[2:] + (loss,) return qo.LossCrosses(*ys) class ForChoice(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.drop = qc.Dropout(cfg.drop, **kw) self.proj = qc.Linear(cfg.d_model, 1, **kw) forward = bert.ForChoice.forward class ForMasked(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Predictor(**kw) forward = qf.forward_masked class ForPreTraining(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(add_pool=True, **kw) self.proj = Predictor(**kw) self.seq = qc.Linear(cfg.d_model, 2, **kw) def forward(self, x, labels=None, ns_labels=None, **kw): cfg = self.cfg ys = self.model(x, **kw) y = self.proj(ys[0]) orders = self.seq(ys[1]) loss = None if labels is not None: f = nn.CrossEntropyLoss() loss = f(y.view(-1, cfg.s_vocab), labels.view(-1)) if loss is not None: loss = loss + f(orders.view(-1, 2), ns_labels.view(-1)) ys = (y, orders) + ys[2:] + (loss,) return bert.LossSeq(*ys) def prep_q_mask(q_lens, n): y = torch.arange(0, n).to(q_lens.device) y.unsqueeze_(0) y = y < q_lens return y class ForQA(PreTrained): def __init__(self, add_pool=False, **kw): kw.update(n_labels=2) super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(add_pool=add_pool, **kw) self.drop = qc.Dropout(cfg.drop) self.ff = MLP(cfg.act, cfg.drop, cfg.eps, cfg) self.proj = qc.Linear(cfg.d_model, cfg.n_labels) def forward(self, x, beg=None, end=None, q_lens=None, typ=None, x_emb=None, **kw): n = x.size(1) if x is not None else x_emb.size(1) if q_lens is None and x is not None: q_lens = torch.argmax(x.eq(self.SEP).int(), dim=-1) + 1 q_lens.unsqueeze_(1) y_m = None if q_lens is not None: y_m = prep_q_mask(q_lens, n) if typ is None: typ = (~y_m).long() y_m[:, 0] = False y_m.unsqueeze_(2) ys = self.model(x, typ=typ, x_emb=x_emb, **kw) y = self.proj(self.ff(self.drop(ys[0]))) if y_m is not None: y = y - y_m * 1e6 b, e = y.split(1, dim=-1) b = b.squeeze(-1).contiguous() e = e.squeeze(-1).contiguous() loss = None if beg is not None and end is not None: if len(beg.size()) > 1: beg = beg.squeeze(-1) if len(end.size()) > 1: end = end.squeeze(-1) i = b.size(1) f = nn.CrossEntropyLoss(ignore_index=i) beg = beg.clamp(0, i) end = end.clamp(0, i) loss = (f(b, beg) + f(e, end)) / 2 ys = (b, e) + ys[2:] + (loss,) return qo.LossQAPools(*ys) class ForSeqClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(cfg.d_model, cfg.act, **kw) forward = qf.forward_seq # y = self.proj(ys[0][:, 0, :]) class ForTokClass(PreTrained): def __init__(self, **kw): super().__init__(**kw) self.get_cfg(kw) self.model = Model(**kw) self.proj = Classifier(**kw) forward = qf.forward_tok class Model(PreTrained): def __init__(self, add_pool=True, **kw): super().__init__(**kw) cfg = self.get_cfg(kw) self.emb = Embed(cfg.d_model, **kw) self.enc = Encoder(**kw) self.pool = Pool(**kw) if add_pool else None if cfg.attn_type != "original_full" and cfg.add_cross: self.set_attn_type("original_full") def set_attn_type(self, x): assert x in ["original_full", "block_sparse"] cfg = self.cfg if x == cfg.attn_type: return cfg.attn_type = x self.enc.set_attention_type(x) def forward( self, x, cache=None, enc_m=None, enc=None, head_m=None, mask=None, pos=None, typ=None, x_emb=None, **kw, ): cfg = self.cfg if x is not None: assert x_emb is None s, d = x.size(), x.device else: s, d = x_emb.size()[:-1], x_emb.device b, n = s c_len = cache[0][0].shape[2] if cache is not None else 0 if mask is None: mask = torch.ones(((b, n + c_len)), device=d) if typ is None: if hasattr(self.emb, "typ_ids"): typ = self.emb.typ_ids[:, :n].expand(b, n) else: typ = torch.zeros(s, dtype=torch.long, device=d) max_tokens_to_attend = (5 + 2 * cfg.n_rand_blocks) * cfg.block_size if cfg.attn_type == "block_sparse" and n <= max_tokens_to_attend: n = x.size(1) if x is not None else x_emb.size(1) self.set_attn_type("original_full") if cfg.attn_type == "block_sparse": (p_len, x, mask, typ, pos, x_emb) = self.pad_to_block( x, mask=mask, pos=pos, typ=typ, x_emb=x_emb ) else: p_len = 0 if cfg.attn_type == "block_sparse": (blocked_enc_m, band_m, from_m, to_m) = self.create_masks_for_block( mask, self.block_size ) mask = None else: assert cfg.attn_type == "original_full" blocked_enc_m = None band_m = None from_m = None to_m = None mask = self.get_mask(mask, s, d) if cfg.is_dec and enc is not None: if enc_m is None: enc_m = torch.ones(enc.size()[:2], device=d) enc_m = self.invert_mask(enc_m) else: enc_m = None head_m = self.get_head_m(head_m, cfg.n_lays) ys = self.emb(x, c_len=c_len, pos=pos, typ=typ, x_emb=x_emb) ys = self.enc( ys, band_m=band_m, blocked_enc_m=blocked_enc_m, cache=cache, enc_m=enc_m, enc=enc, from_m=from_m, head_m=head_m, mask=mask, to_m=to_m, ) y = ys[0] pools = self.pool(y[:, 0, :]) if self.pool is not None else None if p_len > 0: y = y[:, :-p_len] ys = (y,) + ys[1:] + (pools,) return qo.PoolsCrosses(*ys) @staticmethod def create_masks_for_block(mask, block): b, n = mask.size() assert n % block == 0 def create_band_m(from_m, to_m): to_pad = torch.cat([to_m[:, 1:-3], to_m[:, 2:-2], to_m[:, 3:-1]], dim=2) y = torch.einsum("blq,blk->blqk", from_m[:, 2:-2], to_pad) y.unsqueeze_(1) return y enc_m = mask.view(b, n // block, block) band_m = create_band_m(enc_m, enc_m) from_m = mask.view(b, 1, n, 1) to_m = mask.view(b, 1, 1, n) return enc_m, band_m, from_m, to_m def pad_to_block(self, x, mask, typ, pos, x_emb, PAD): cfg = self.cfg block_size = cfg.block_size shape = x.shape if x is not None else x_emb.shape b, n = shape[:2] p_len = (block_size - n % block_size) % block_size if p_len > 0: if x is not None: x = F.pad(x, (0, p_len), value=PAD) if pos is not None: pos = F.pad(pos, (0, p_len), value=PAD) if x_emb is not None: p = x_emb.new_full((b, p_len), cfg.PAD, dtype=torch.long) x_emb = torch.cat([x_emb, self.emb(p)], dim=-2) mask = F.pad(mask, (0, p_len), value=False) typ = F.pad(typ, (0, p_len), value=0) return p_len, x, mask, typ, pos, x_emb class Encoder(qc.Module): hs = qc.Hypers({"d_model", "n_heads", "n_pos", "eps"}, {"drop_attn": 0.0, "is_dec": False}) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) self.lays = qc.Stack([Layer(seed=i, **kw) for i in range(cfg.n_lays)]) self.grad_checkpoint = False def set_attention_type(self, x): assert x in ["original_full", "block_sparse"] cfg = self.cfg if x == cfg.attn_type: return cfg.attn_type = x for lay in self.lays: lay.set_attention_type(x) def forward(self, x, cache=None, head_m=None, **kw): cfg = self.cfg y = x attns = () caches = () crosses = () hiddens = () for i, lay in enumerate(self.lays): hiddens += (y,) h = head_m[i] if head_m is not None else None c = cache[i] if cache is not None else None if self.grad_checkpoint and self.training: def create_forward(x): def forward(*xs): return x(*xs, cache=c) return forward ys = checkpoint(create_forward(lay), y, **kw, head_m=h) else: ys = lay(y, **kw, cache=c, head_m=h) y = ys[0] attns += (ys[1],) if cfg.add_cross: crosses += (ys[2],) caches += (ys[-1],) hiddens += (y,) return qo.CachesCrosses(y, attns, caches, crosses, hiddens) class Layer(qc.Module): hs = qc.Hypers({"d_model", "n_heads", "n_pos", "eps"}, {"drop_attn": 0.0, "is_dec": False}) def __init__(self, seed=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) cfg.attn_type = cfg.attn_type self.attn = Attention(cfg, seed=seed) self.is_dec = cfg.is_dec self.add_cross = cfg.add_cross if self.add_cross: assert self.is_dec self.cross = Attention(cfg) self.ffnet = MLP(cfg.act, cfg.drop, cfg.eps, **kw) def set_attention_type(self, x): assert x in ["original_full", "block_sparse"] cfg = self.cfg if x == cfg.attn_type: return cfg.attn_type = x self.attn.set_attention_type(x) if self.add_cross: self.cross.set_attention_type(x) def forward( self, x, mask=None, head_m=None, enc=None, enc_m=None, band_m=None, from_m=None, to_m=None, blocked_encoder_mask=None, prev_kv=None, **kw, ): self_attn_past_key_value = prev_kv[:2] if prev_kv is not None else None self_attention_outputs = self.attn( x, mask, head_m, enc=enc, enc_m=enc_m, prev_kv=self_attn_past_key_value, band_m=band_m, from_m=from_m, to_m=to_m, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] if self.is_dec: y = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: y = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_dec and enc is not None: assert hasattr(self, "crossattention") cross_attn_past_key_value = prev_kv[-2:] if prev_kv is not None else None cross_attention_outputs = self.cross( attention_output, mask, head_m, enc, enc_m, cross_attn_past_key_value, ) attention_output = cross_attention_outputs[0] y = y + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = self.ffnet( attention_output, ) y = (layer_output,) + y if self.is_dec: y = y + (present_key_value,) return y class Attention(qc.Module): hs = qc.Hypers({"d_embed", "d_model", "n_heads", "use_bias", "attn_type"}, {"drop_attn": 0.0}) def __init__(self, seed=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) cfg.seed = seed if cfg.attn_type == "original_full": self.attn = FullAttn(**kw) else: assert cfg.attn_type == "block_sparse" self.attn = SparseAttn(seed, **kw) m = cfg.d_model self.proj = qc.Linear(m, m, **kw) self.drop = qc.Dropout(cfg.drop, **kw) self.norm = qc.LayerNorm(m, **kw) def set_attention_type(self, x): cfg = self.cfg assert x in ["original_full", "block_sparse"] if x == cfg.attn_type: return cfg.attn_type = x if x == "original_full": a = FullAttn(**kw) else: a = SparseAttn(cfg.seed, **kw) a.query = self.attn.query a.value = self.attn.value a.key = self.attn.key self.attn = a cfg.attn_type = x if not self.training: self.attn.eval() def forward(self, x, enc=None, **kw): if self.cfg.attn_type == "original_full": ys = self.attn(x, **kw, enc=enc) else: assert enc is None ys = self.attn(x, **kw) y = self.norm(x + self.drop(self.proj(ys[0]))) y = (y,) + ys[1:] return y class FullAttn(qc.Module): hs = qc.Hypers( {"d_embed", "d_model", "n_heads", "use_bias"}, {"drop_attn": 0.0}, ) def __init__(self, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) m, n = cfg.d_model, cfg.n_heads assert m % n == 0 or cfg.d_embed is not None cfg.d_head = h = m // n cfg.scale = 1 / (h**0.5) self.query = qc.Linear(m, m, bias=cfg.use_bias, **kw) self.key = qc.Linear(m, m, bias=cfg.use_bias, **kw) self.value = qc.Linear(m, m, bias=cfg.use_bias, **kw) self.drop = qc.Dropout(cfg.drop_attn, **kw) split_heads = qa.split_heads def forward(self, x, cache=None, enc_m=None, enc=None, head_m=None, mask=None, **kw): cfg = self.cfg q = self.split_heads(self.query(x)) if enc is None: k = self.split_heads(self.key(x)) v = self.split_heads(self.value(x)) if cache is not None: k = torch.cat([cache[0], k], dim=2) v = torch.cat([cache[1], v], dim=2) else: mask = enc_m if cache is None: k = self.split_heads(self.key(enc)) v = self.split_heads(self.value(enc)) else: k = cache[0] v = cache[1] a = torch.matmul(q, k.transpose(-1, -2)) a.mul_(cfg.scale) if mask is not None: a += mask a = self.drop(F.softmax(a, dim=-1)) if head_m is not None: a *= head_m y = torch.matmul(a, v).permute(0, 2, 1, 3).contiguous() y = y.view(y.size()[:-2] + (cfg.n_heads * cfg.d_head,)) return y, a, (k, v) class SparseAttn(qc.Module): hs = qc.Hypers( {"d_embed", "d_model", "n_heads", "n_pos", "use_bias", "n_rand_blocks", "block_size"}, {"drop_attn": 0.0}, ) def __init__(self, seed=None, ps={}, hs=[], **kw): super().__init__(ps, [self.hs] + hs, **kw) cfg = self.get_cfg(kw) cfg.seed = seed m, n = cfg.d_model, cfg.n_heads assert m % n == 0 cfg.d_head = int(m / n) cfg.s_all_head = h = n * cfg.d_head self.query = qc.Linear(m, h, bias=cfg.use_bias, **kw) self.key = qc.Linear(m, h, bias=cfg.use_bias, **kw) self.value = qc.Linear(m, h, bias=cfg.use_bias, **kw) split_heads = qa.split_heads def forward(self, x, **kw): b, seqlen, _ = x.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size assert from_seq_length % from_block_size == 0 assert to_seq_length % to_block_size == 0 q = self.split_heads(self.query(x)) k = self.split_heads(self.key(x)) v = self.split_heads(self.value(x)) ctx, y = self.bigbird_block_sparse_attention( q, k, v, band_m, from_m, to_m, from_blocked_mask, to_blocked_mask, d_head, from_block_size, to_block_size, b, from_seq_length, to_seq_length, plan_from_length=None, plan_num_rand_blocks=None, **kw, ) ctx = ctx.contiguous().view(b, from_seq_length, -1) return ctx, y @staticmethod def torch_bmm_nd(x1, x2, ndim=None): s1, s2 = x1.shape, x2.shape return torch.bmm(x1.reshape((-1,) + s1[-2:]), x2.reshape((-1,) + s2[-2:])).view( s1[: ndim - 2] + (s1[ndim - 2], s2[ndim - 1]), ) @staticmethod def torch_bmm_nd_transpose(x1, x2, ndim=None): s1, s2 = x1.shape, x2.shape return torch.bmm( x1.reshape((-1,) + s1[-2:]), x2.reshape((-1,) + s2[-2:]).transpose(1, 2), ).view(s1[: ndim - 2] + (s1[ndim - 2], s2[ndim - 2])) def bigbird_block_sparse_attention( self, q, k, v, band_m, from_m, to_m, from_blocked_mask, to_blocked_mask, d_head, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, plan_from_length, plan_num_rand_blocks, **kw, ): cfg = self.cfg assert from_seq_len // from_block_size == to_seq_len // to_block_size rsqrt_d = 1 / (d_head**0.5) bsz = batch_size attn_mask_penalty = -10000.0 np.random.seed(cfg.seed) if from_seq_len in [1024, 3072, 4096]: rand_attn = [ self._bigbird_block_rand_mask( cfg.n_pos, cfg.n_pos, from_block_size, to_block_size, last_idx=1024, )[: (from_seq_len // from_block_size - 2)] for _ in range(cfg.n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=q.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, bsz, from_seq_len, from_block_size, ) q = q.view(bsz, cfg.n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = k.view( bsz, cfg.n_heads, to_seq_len // to_block_size, to_block_size, -1 ) blocked_value_matrix = v.view( bsz, cfg.n_heads, to_seq_len // to_block_size, to_block_size, -1 ) gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, cfg.n_heads, to_seq_len // to_block_size - 2, cfg.n_rand_blocks * to_block_size, -1 ) gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, cfg.n_heads, to_seq_len // to_block_size - 2, cfg.n_rand_blocks * to_block_size, -1 ) first_product = self.torch_bmm_nd_transpose(q[:, :, 0], k, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_m) * attn_mask_penalty first_attn_weights = F.softmax(first_product, dim=-1) first_context_layer = self.torch_bmm_nd(first_attn_weights, v, ndim=4) first_context_layer.unsqueeze_(2) second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) second_product = self.torch_bmm_nd_transpose(q[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_m[:, :, :, : 3 * to_block_size], to_m[:, :, :, -to_block_size:], to_m.new_ones([bsz, 1, 1, cfg.n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, cfg.n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = F.softmax(second_product, dim=-1) second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) exp_blocked_key_matrix = torch.cat( [ blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1], ], dim=3, ) exp_blocked_value_matrix = torch.cat( [ blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1], ], dim=3, ) middle_query_matrix = q[:, :, 2:-2] inner_band_product = self.torch_bmm_nd_transpose( middle_query_matrix, exp_blocked_key_matrix, ndim=5 ) inner_band_product = inner_band_product * rsqrt_d rand_band_product = self.torch_bmm_nd_transpose( middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5 ) rand_band_product = rand_band_product * rsqrt_d first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) first_band_product = first_band_product * rsqrt_d last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) last_band_product = last_band_product * rsqrt_d inner_band_product += (1.0 - band_m) * attn_mask_penalty first_band_product += (1.0 - to_m[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_m[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) attn_weights = F.softmax(band_product, dim=-1) ctx = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5, ) ctx += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5, ) ctx += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0], ) ctx += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1], ) second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) second_last_product = self.torch_bmm_nd_transpose(q[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_m[:, :, :, :to_block_size], to_m[:, :, :, -3 * to_block_size :], to_m.new_ones([bsz, 1, 1, cfg.n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, cfg.n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += ( 1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad) ) * attn_mask_penalty second_last_attn_weights = F.softmax(second_last_product, dim=-1) second_last_context_layer = self.torch_bmm_nd( second_last_attn_weights, second_last_value_mat, ndim=4 ) second_last_context_layer.unsqueeze_(2) last_product = self.torch_bmm_nd_transpose(q[:, :, -1], k, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_m) * attn_mask_penalty last_attn_weights = F.softmax(last_product, dim=-1) last_context_layer = self.torch_bmm_nd(last_attn_weights, v, ndim=4) last_context_layer.unsqueeze_(2) ctx = torch.cat( [ first_context_layer, second_context_layer, ctx, second_last_context_layer, last_context_layer, ], dim=2, ) ctx = ctx.view((bsz, cfg.n_heads, from_seq_len, -1)) * from_m ctx = torch.transpose(ctx, 1, 2) y = torch.zeros( bsz, cfg.n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=ctx.device, ) y[:, :, :from_block_size, :] = first_attn_weights # all keys global y[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] y[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): for p2, i2, w2 in zip(range(cfg.n_heads), i1, w1): attn_probs_view = y.view( bsz, cfg.n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, cfg.n_rand_blocks, to_block_size ) for i in range(from_seq_len // from_block_size - 4): attn_probs_view = y.view( bsz, cfg.n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, i, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, i, :, i : i + 3, :] = right_slice.view( bsz, cfg.n_heads, from_block_size, 3, to_block_size ) y[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view(bsz, cfg.n_heads, -1, to_block_size) y[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view(bsz, cfg.n_heads, -1, to_block_size) for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): for p2, i2, w2 in zip(range(cfg.n_heads), i1, w1): for i in range(1, len(i2) - 1): attn_probs_view = y.view( bsz, cfg.n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[i - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, i + 1, :, i2[i]] = right_slice.view( from_block_size, cfg.n_rand_blocks, to_block_size ) y[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] y[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[:, :, :, to_block_size : 4 * to_block_size] for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): for p2, i2, w2 in zip(range(cfg.n_heads), i1, w1): attn_probs_view = y.view( bsz, cfg.n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, cfg.n_rand_blocks, to_block_size ) y[:, :, -from_block_size:, :] = last_attn_weights return ctx, y @staticmethod def torch_gather_b2(params, indices): assert params.shape[:2] == indices.shape[:2] num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] indices_shift = ( torch.arange( indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device ) // num_indices_to_gather * num_indices_to_pick_from ) flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) y = flattened_params.index_select(0, flattened_indices) y = y.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return y @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view( batch_size, n_heads, num_windows, num_rand_blocks * from_block_size ) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): assert from_seq_length // from_block_size == to_seq_length // to_block_size rand_attn = np.zeros( (from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32 ) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): cfg = self.cfg assert from_seq_length // from_block_size == to_seq_length // to_block_size assert from_seq_length in plan_from_length num_blocks = from_seq_length // from_block_size plan_block_length = np.array(plan_from_length) // from_block_size max_plan_idx = plan_from_length.index(from_seq_length) rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(cfg.n_heads) ] for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(cfg.n_heads): rand_attn[h][ blk_rw_idx, rnd_r_cnt:curr_r_cnt ] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range( plan_block_length[plan_idx - 1], plan_block_length[plan_idx] ): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(cfg.n_heads): rand_attn[h][ blk_rw_idx, rnd_r_cnt:curr_r_cnt ] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(cfg.n_heads): rand_attn[h][ blk_rw_idx, rnd_r_cnt:curr_r_cnt ] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(cfg.n_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) perm_block = np.random.permutation(to_block_list) illegal_blocks = list( range(block_id - window_block_left, block_id + window_block_right + 1) ) illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) if block_id == 1: illegal_blocks.append(to_end_block_id - 2) if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32)
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,454
quantapix/qnarre
refs/heads/main
/tools/triton/python/test/unit/language/print_helper.py
import sys import torch from torch.testing import assert_close import triton import triton.language as tl @triton.jit def kernel_device_print(X, Y, BLOCK: tl.constexpr): x = tl.load(X + tl.arange(0, BLOCK)) tl.device_print("", x) tl.store(Y + tl.arange(0, BLOCK), x) @triton.jit def kernel_print(X, Y, BLOCK: tl.constexpr): x = tl.load(X + tl.arange(0, BLOCK)) print("", x) tl.store(Y + tl.arange(0, BLOCK), x) @triton.jit def kernel_static_print(X, Y, BLOCK: tl.constexpr): x = tl.load(X + tl.arange(0, BLOCK)) tl.static_print(x) tl.store(Y + tl.arange(0, BLOCK), x) def test_print(func: str, data_type: str): shape = (128, ) # limit the range of integers so that the sum does not overflow x = torch.arange(0, shape[0], dtype=torch.int32, device='cuda').to(getattr(torch, data_type)) y = torch.zeros(shape, dtype=x.dtype, device="cuda") if func == "device_print": kernel_device_print[(1,)](x, y, BLOCK=shape[0]) elif func == "print": kernel_print[(1,)](x, y, BLOCK=shape[0]) elif func == "static_print": kernel_static_print[(1,)](x, y, BLOCK=shape[0]) assert_close(y, x) if __name__ == "__main__": test_print(sys.argv[1], sys.argv[2])
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,455
quantapix/qnarre
refs/heads/main
/qnarre/prep/feature/segformer.py
import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ImageFeatureExtractionMixin, ImageInput, is_torch_tensor, ) from ...utils import logging logger = logging.get_logger(__name__) class SegformerFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): model_input_names = ["pixel_values"] def __init__( self, do_resize=True, size=512, resample=Image.BILINEAR, do_normalize=True, image_mean=None, image_std=None, reduce_labels=False, **kw, ): super().__init__(**kw) self.do_resize = do_resize self.size = size self.resample = resample self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.reduce_labels = reduce_labels def __call__( self, images: ImageInput, segmentation_maps: ImageInput = None, return_tensors=None, **kw, ): valid_images = False valid_segmentation_maps = False # Check that images has a valid type if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): valid_images = True elif isinstance(images, (list, tuple)): if ( len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]) ): valid_images = True if not valid_images: raise ValueError( "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) # Check that segmentation maps has a valid type if segmentation_maps is not None: if isinstance(segmentation_maps, (Image.Image, np.ndarray)) or is_torch_tensor( segmentation_maps ): valid_segmentation_maps = True elif isinstance(segmentation_maps, (list, tuple)): if ( len(segmentation_maps) == 0 or isinstance(segmentation_maps[0], (Image.Image, np.ndarray)) or is_torch_tensor(segmentation_maps[0]) ): valid_segmentation_maps = True if not valid_segmentation_maps: raise ValueError( "Segmentation maps must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example)," "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." ) is_batched = bool( isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) ) if not is_batched: images = [images] if segmentation_maps is not None: segmentation_maps = [segmentation_maps] # reduce zero label if needed if self.reduce_labels: if segmentation_maps is not None: for idx, map in enumerate(segmentation_maps): if not isinstance(map, np.ndarray): map = np.array(map) # avoid using underflow conversion map[map == 0] = 255 map = map - 1 map[map == 254] = 255 segmentation_maps[idx] = Image.fromarray(map.astype(np.uint8)) # transformations (resizing + normalization) if self.do_resize and self.size is not None: images = [ self.resize(image=image, size=self.size, resample=self.resample) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self.resize(map, size=self.size, resample=Image.NEAREST) for map in segmentation_maps ] if self.do_normalize: images = [ self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images ] # return as BatchFeature data = {"pixel_values": images} if segmentation_maps is not None: labels = [] for map in segmentation_maps: if not isinstance(map, np.ndarray): map = np.array(map) labels.append(map.astype(np.int64)) # cast to np.int64 data["labels"] = labels encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,456
quantapix/qnarre
refs/heads/main
/qnarre/prep/dataset/samsum.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json import py7zr import datasets as ds _URLS = "https://arxiv.org/src/1911.12237v2/anc/corpus.7z" class Samsum(ds.GeneratorBasedBuilder): BUILDER_CONFIGS = [ds.BuilderConfig(name="samsum", version=ds.Version("1.1.0"))] def _info(self): return ds.DatasetInfo( description="", citation="", homepage="", license="", features=ds.Features( { "id": ds.Value("string"), "dialogue": ds.Value("string"), "summary": ds.Value("string"), } ), ) def _split_generators(self, mgr): path = mgr.download_and_extract(_URLS) return [ ds.SplitGenerator( name=ds.Split.TRAIN, gen_kw={"filepath": (path, "train.json"), "split": "train"}, ), ds.SplitGenerator( name=ds.Split.TEST, gen_kw={"filepath": (path, "test.json"), "split": "test"}, ), ds.SplitGenerator( name=ds.Split.VALIDATION, gen_kw={"filepath": (path, "val.json"), "split": "val"}, ), ] def _generate_examples(self, filepath, _): path, fname = filepath with py7zr.SevenZipFile(path, "r") as z: for name, bio in z.readall().items(): if name == fname: data = json.load(bio) for e in data: yield e["id"], e
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,457
quantapix/qnarre
refs/heads/main
/tools/triton/python/triton/runtime/jit.py
from __future__ import annotations, division import ast import functools import hashlib import inspect import os import subprocess import textwrap from collections import defaultdict, namedtuple from typing import Callable, Generic, Iterable, Optional, TypeVar, Union, cast, overload import triton def get_cuda_stream(idx=None): if idx is None: idx = get_current_device() try: from torch._C import _cuda_getCurrentRawStream return _cuda_getCurrentRawStream(idx) except ImportError: import torch return torch.cuda.current_stream(idx).cuda_stream def get_current_device(): import torch return torch.cuda.current_device() def set_current_device(idx): import torch torch.cuda.set_device(idx) def get_device_capability(idx): import torch return torch.cuda.get_device_capability(idx) T = TypeVar('T') # ----------------------------------------------------------------------------- # Dependencies Finder # ----------------------------------------------------------------------------- class DependenciesFinder(ast.NodeVisitor): """ This AST visitor is used to find dependencies of a JITFunction. This can be used to invalidate a JITFunction's hash when its source code -- or that of its dependencies -- changes. """ def __init__(self, globals, src) -> None: super().__init__() self.ret = hashlib.md5(src.encode("utf-8")).hexdigest() self.globals = globals def visit_Name(self, node): return self.globals.get(node.id, None) def visit_Attribute(self, node): lhs = self.visit(node.value) while isinstance(lhs, ast.Attribute): lhs = self.visit(lhs.value) if lhs is None or lhs is triton: return None return getattr(lhs, node.attr) def visit_Call(self, node): func = self.visit(node.func) if func is None: return if inspect.isbuiltin(func): return if func.__module__ and func.__module__.startswith('triton.'): return assert isinstance(func, JITFunction), f"Function \"{func.__name__}\" is being called from a Triton function but is not a Triton function itself. Decorate it with @triton.jit to fix this" if func.hash is None: tree = ast.parse(func.src) finder = DependenciesFinder(func.__globals__, func.src) finder.visit(tree) func.hash = finder.ret noinline = str(getattr(func, 'noinline', False)) self.ret = (self.ret + func.hash + noinline).encode("utf-8") self.ret = hashlib.md5(self.ret).hexdigest() # ----------------------------------------------------------------------------- # JITFunction # ----------------------------------------------------------------------------- @functools.lru_cache() def version_key(): import pkgutil contents = [] # frontend with open(__file__, "rb") as f: contents += [hashlib.md5(f.read()).hexdigest()] # compiler compiler_path = os.path.join(*triton.__path__, 'compiler') for lib in pkgutil.iter_modules([compiler_path]): with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: contents += [hashlib.md5(f.read()).hexdigest()] # backend with open(triton._C.libtriton.__file__, "rb") as f: contents += [hashlib.md5(f.read()).hexdigest()] # language language_path = os.path.join(*triton.__path__, 'language') for lib in pkgutil.iter_modules([language_path]): with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: contents += [hashlib.md5(f.read()).hexdigest()] # ptxas version try: ptxas_version = hashlib.md5(subprocess.check_output(["ptxas", "--version"])).hexdigest() except Exception: ptxas_version = '' return '-'.join(triton.__version__) + '-' + ptxas_version + '-' + '-'.join(contents) class KernelInterface(Generic[T]): run: T def __getitem__(self, grid) -> T: """ A JIT function is launched with: fn[grid](*args, **kwargs). Hence JITFunction.__getitem__ returns a callable proxy that memorizes the grid. """ return cast(T, functools.partial(cast(Callable, self.run), grid=grid)) class JITFunction(KernelInterface[T]): # Hook for inspecting compiled functions and modules cache_hook = None divisibility = 16 @staticmethod def _key_of(arg): if hasattr(arg, "dtype"): return arg.dtype elif isinstance(arg, bool): return "i1" elif isinstance(arg, int): if -2**31 <= arg and arg <= 2**31 - 1: return "i32" elif 2**63 <= arg and arg <= 2**64 - 1: return "u64" else: return "i64" elif isinstance(arg, float): return 'fp32' elif arg is None: return None else: raise TypeError(f'Unsupported type {type(arg)} for {arg}') @staticmethod def _spec_of(arg): if hasattr(arg, "data_ptr"): return (arg.data_ptr() % JITFunction.divisibility == 0) elif isinstance(arg, int): return (arg % 16 == 0, arg == 1) return (arg is None, ) def _get_config(self, *args): def is_divisible_by_16(x): if hasattr(x, "data_ptr"): return x.data_ptr() % JITFunction.divisibility == 0 elif isinstance(x, int): return x % JITFunction.divisibility == 0 if x is None: return True return False divisible_by_16 = {i for i, arg in enumerate(args) if is_divisible_by_16(arg) and i not in self.do_not_specialize} equal_to_1 = {i for i, arg in enumerate(args) if not isinstance(arg, bool) and isinstance(arg, int) and arg == 1 and i not in self.do_not_specialize} return namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"])(tuple(divisible_by_16), tuple(equal_to_1)) # return _triton.code_gen.instance_descriptor(divisible_by_16, equal_to_1) @staticmethod def _type_of(key): # None are nullptr -- implicitly converted to *i8 if key is None: return '*i8' dtype_str = str(key).split(".")[-1] tys = { "bool": "i1", "float8e5": "fp8e5", "float8e4": "fp8e4", "float16": "fp16", "bfloat16": "bf16", "float32": "fp32", "float64": "fp64", "int8": "i8", "int16": "i16", "int32": "i32", "int64": "i64", "uint8": "u8", "uint16": "u16", "uint32": "u32", "uint64": "u64", } # reinterpret can create triton type for v in list(tys.values()): tys[v] = v return key if isinstance(key, str) else f"*{tys[dtype_str]}" def _make_signature(self, sig_key): signature = ",".join([self._type_of(k) for i, k in enumerate(sig_key)]) return signature def _make_constants(self, constexpr_key): constants = dict(zip(self.constexprs, constexpr_key)) return constants def _call_hook(self, key, signature, device, constants, num_warps, num_stages, extern_libs, configs): if JITFunction.cache_hook is None: return False name = self.fn.__name__ module = self.fn.__module__ arg_reprs = ', '.join([f'{name}: {ty}' for name, ty in zip(self.arg_names, key[1])]) repr = f"{name}[num_warps={num_warps}, num_stages={num_stages}]({arg_reprs})" key = str(key) class LegacyCompiler: def __init__(self, module, name): self.module = module self.name = name pass kwargs = dict(signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs) return JITFunction.cache_hook(key=key, repr=repr, fn=LegacyCompiler(module, name), compile={"key": key, **kwargs}, is_manual_warmup=False, already_compiled=False) def _get_arg_specialization_key(self, arg) -> str: arg_annotation = self.__annotations__.get(arg, '') if arg_annotation == '': return f'({arg}.data_ptr() % {JITFunction.divisibility} == 0) if hasattr({arg}, "data_ptr") \ else ({arg} % {JITFunction.divisibility} == 0, {arg} == 1) if isinstance({arg}, int) \ else (False,)' elif 'Tensor' in arg_annotation: return f'({arg}.data_ptr() % {JITFunction.divisibility} == 0)' elif arg_annotation == 'int': return f'({arg} % {JITFunction.divisibility} == 0, {arg} == 1)' else: return '(False,)' def _get_arg_sig_key(self, arg) -> str: arg_annotation = self.__annotations__.get(arg, '') if 'Tensor' in arg_annotation: return f'{arg}.dtype' elif arg_annotation == 'bool': return "i1" elif arg_annotation == 'float': return 'fp32' else: return f'_key_of({arg})' def _make_launcher(self): regular_args = [f'{arg}' for i, arg in enumerate(self.arg_names) if i not in self.constexprs] constexpr_args = [f'{arg}' for i, arg in enumerate(self.arg_names) if i in self.constexprs] args = ', '.join(regular_args) # cache key for regular argument type sig_keys = ', '.join([self._get_arg_sig_key(arg) for arg in regular_args]) # cache key for constexpr argument values constexpr_keys = ', '.join(constexpr_args) # cache key for argument specialization specializations = [] for i, arg in enumerate(regular_args): if i in self.do_not_specialize: continue specializations += [self._get_arg_specialization_key(arg)] spec_keys = ', '.join(specializations) grid_args = ','.join([f'"{arg}": {arg}' for arg in self.arg_names]) src = f""" def {self.fn.__name__}({', '.join(self.arg_names)}, grid, num_warps=4, num_stages=3, extern_libs=None, stream=None, warmup=False, device=None): sig_key = {sig_keys}, constexpr_key = {f'{constexpr_keys},' if len(constexpr_keys) > 0 else ()} spec_key = {f'{spec_keys},' if len(spec_keys) > 0 else ()} key = (version_key, sig_key, constexpr_key, spec_key, num_warps, num_stages, self.debug) if not extern_libs is None: key = (key, tuple(extern_libs.items())) assert num_warps > 0 and (num_warps & (num_warps - 1)) == 0, "num_warps must be a power of 2" if callable(grid): grid = grid({{{grid_args}}}) grid_size = len(grid) grid_0 = grid[0] grid_1 = grid[1] if grid_size > 1 else 1 grid_2 = grid[2] if grid_size > 2 else 1 if device is None: device = get_current_device() set_current_device(device) if stream is None and not warmup: stream = get_cuda_stream(device) bin = cache[device].get(key, None) if bin is not None: if not warmup: bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, {args}) return bin # kernel not cached -- compile else: # build dict of constant values args = [{args}] all_args = {', '.join([f'{arg}' for arg in self.arg_names])}, configs = self._get_config(*all_args), constants = self._make_constants(constexpr_key) constants.update({{i: None for i, arg in enumerate(all_args) if arg is None}}) constants.update({{i: 1 for i in configs[0].equal_to_1}}) # build kernel signature -- doesn't include specialized arguments signature = {{ i: self._type_of(_key_of(arg)) for i, arg in enumerate(all_args) if i not in self.constexprs }} # build stub signature -- includes arguments that are specialized for i, arg in constants.items(): if callable(arg): raise TypeError(f"Callable constexpr at index {{i}} is not supported") if not self._call_hook(key, signature, device, constants, num_warps, num_stages, extern_libs, configs): bin = triton.compile(self, signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs, debug=self.debug) if not warmup: bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args) self.cache[device][key] = bin return bin return None """ scope = {"version_key": version_key(), "get_cuda_stream": get_cuda_stream, "self": self, "_spec_of": self._spec_of, "_key_of": self._key_of, "cache": self.cache, "triton": triton, "get_current_device": get_current_device, "set_current_device": set_current_device} exec(src, scope) return scope[self.fn.__name__] def __init__(self, fn, version=None, do_not_specialize=None, debug=None, noinline=None): self.fn = fn self.module = fn.__module__ self.version = version # function signature information signature = inspect.signature(fn) self.arg_names = [v.name for v in signature.parameters.values()] self.has_defaults = any(v.default != inspect._empty for v in signature.parameters.values()) # specialization hints self.do_not_specialize = [] if do_not_specialize is None else do_not_specialize self.do_not_specialize = {self.arg_names.index(arg) if isinstance(arg, str) else arg for arg in self.do_not_specialize} # function source code (without decorators) self.src = textwrap.dedent(inspect.getsource(fn)) self.src = self.src[self.src.find("def"):] # cache of just-in-time compiled kernels self.cache = defaultdict(dict) self.hash = None # JITFunction can be instantiated as kernel # when called with a grid using __getitem__ self.kernel_decorators = [] self.kernel = None self.debug = os.environ.get("TRITON_DEBUG", "0") == "1" if debug is None else debug self.noinline = noinline # annotations normalize_ty = lambda ty: ty.__name__ if isinstance(ty, type) else ty self.__annotations__ = {name: normalize_ty(ty) for name, ty in fn.__annotations__.items()} # index of constexprs self.constexprs = [self.arg_names.index(name) for name, ty in self.__annotations__.items() if 'constexpr' in ty] # launcher self.run = self._make_launcher() # re-use docs of wrapped function self.__doc__ = fn.__doc__ self.__name__ = fn.__name__ self.__globals__ = fn.__globals__ self.__module__ = fn.__module__ @property def cache_key(self): # TODO : hash should be attribute of `self` if self.hash is None: dependencies_finder = DependenciesFinder(globals=self.__globals__, src=self.src) dependencies_finder.visit(self.parse()) self.hash = dependencies_finder.ret + version_key() return self.hash def warmup(self, *args, **kwargs): return self.run(*map(MockTensor.wrap_dtype, args), **kwargs, warmup=True) # we do not parse `src` in the constructor because # the user might want to monkey-patch self.src dynamically. # Our unit tests do this, for example. def parse(self): tree = ast.parse(self.src) assert isinstance(tree, ast.Module) assert len(tree.body) == 1 assert isinstance(tree.body[0], ast.FunctionDef) return tree def __call__(self, *args, **kwargs): raise RuntimeError("Cannot call @triton.jit'd outside of the scope of a kernel") def __setattr__(self, name, value): # - when kernel decorators change, cached kernel # needs to be cleared if name == 'kernel_decorators': self.kernel = None super(JITFunction, self).__setattr__(name, value) # - when `.src` attribute is set, cache path needs # to be reinitialized if name == 'src': self.hash = None def __repr__(self): return f"JITFunction({self.module}:{self.fn.__name__})" # ----------------------------------------------------------------------------- # `jit` decorator # ----------------------------------------------------------------------------- @overload def jit(fn: T) -> JITFunction[T]: ... @overload def jit( *, version=None, do_not_specialize: Optional[Iterable[int]] = None, debug: Optional[bool] = None, noinline: Optional[bool] = None, ) -> Callable[[T], JITFunction[T]]: ... def jit( fn: Optional[T] = None, *, version=None, do_not_specialize: Optional[Iterable[int]] = None, debug: Optional[bool] = None, noinline: Optional[bool] = None, interpret: Optional[bool] = None, ) -> Union[JITFunction[T], Callable[[T], JITFunction[T]]]: """ Decorator for JIT-compiling a function using the Triton compiler. :note: When a jit'd function is called, arguments are implicitly converted to pointers if they have a :code:`.data_ptr()` method and a `.dtype` attribute. :note: This function will be compiled and run on the GPU. It will only have access to: * python primitives, * builtins within the triton package, * arguments to this function, * other jit'd functions :param fn: the function to be jit-compiled :type fn: Callable """ def decorator(fn: T) -> JITFunction[T]: assert callable(fn) if interpret: from ..debugger.debugger import GridSelector return GridSelector(fn) else: return JITFunction( fn, version=version, do_not_specialize=do_not_specialize, debug=debug, noinline=noinline, ) if fn is not None: return decorator(fn) else: return decorator # ----------------------------------------------------------------------------- # Utilities for mocking tensors # ----------------------------------------------------------------------------- class MockTensor: """ Can be used in place of real tensors when calling: kernel.warmup(MockTensor(torch.float32), ...) """ @staticmethod def wrap_dtype(arg): if arg.__class__.__name__ == "dtype" and\ arg.__module__ == "torch": return MockTensor(arg) return arg def __init__(self, dtype): self.dtype = dtype @staticmethod def data_ptr(): return 0 # optimistically assumes multiple of 16 class TensorWrapper: def __init__(self, base, dtype): self.dtype = dtype self.base = base self.is_cuda = base.is_cuda self.device = base.device def data_ptr(self): return self.base.data_ptr() def __str__(self) -> str: return f'TensorWrapper[{self.dtype}]({self.base})' def reinterpret(tensor, dtype): if isinstance(tensor, TensorWrapper): if dtype == tensor.base.dtype: # Reinterpreting to the original interpretation; return the base. return tensor.base else: # Reinterpreting a wrapped tensor to a different type. return TensorWrapper(tensor.base, dtype) elif hasattr(tensor, "data_ptr"): # A new wrapper is needed around an unwrapped tensor. return TensorWrapper(tensor, dtype) else: raise TypeError(f'Cannot reinterpret a {type(tensor)}.')
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}
33,458
quantapix/qnarre
refs/heads/main
/qnarre/prep/tokens/fast/roberta.py
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import json from tokenizers import pre_tokenizers, processors from ....tokens.base import AddedToken from ....tokens.fast import PreTrainedTokenizerFast from ..roberta import Tokenizer as Roberta VOCAB_FS = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json", } VOCAB_MAP = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json", }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt", }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json", }, } INPUT_CAPS = { "roberta-base": 512, "roberta-large": 512, "roberta-large-mnli": 512, "distilroberta-base": 512, "roberta-base-openai-detector": 512, "roberta-large-openai-detector": 512, } class Tokenizer(PreTrainedTokenizerFast): vocab_fs = VOCAB_FS vocab_map = VOCAB_MAP input_caps = INPUT_CAPS model_input_names = ["input_ids", "mask"] slow_tokenizer_class = Roberta def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, errors="replace", bos="<s>", eos="</s>", sep="</s>", cls="<s>", unk="<unk>", pad="<pad>", msk="<mask>", add_prefix_space=False, trim_offsets=True, **kw, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos=bos, eos=eos, sep=sep, cls=cls, unk=unk, pad=pad, msk=msk, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kw, ) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) pre_tok_state["add_prefix_space"] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = "post_processor" tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if "sep" in state: state["sep"] = tuple(state["sep"]) if "cls" in state: state["cls"] = tuple(state["cls"]) changes_to_apply = False if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: state["add_prefix_space"] = add_prefix_space changes_to_apply = True if state.get("trim_offsets", trim_offsets) != trim_offsets: state["trim_offsets"] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop("type")) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def msk(self): if self._mask_token is None and self.verbose: logger.error("Using msk, but it is not set yet.") return None return str(self._mask_token) @msk.setter def msk(self, value): value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kw): is_split_into_words = kw.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*args, **kw) def _encode_plus(self, *args, **kw): is_split_into_words = kw.get("is_split_into_words", False) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*args, **kw) def save_vocabulary(self, dir, pre=None): return tuple(self._tokenizer.model.save(dir, name=pre)) def build_inputs_with_special_tokens(self, toks_0, toks_1=None): y = [self.BOS] + toks_0 + [self.EOS] if toks_1 is None: return y return y + [self.EOS] + toks_1 + [self.EOS] def create_token_type_ids_from_sequences(self, toks_0, toks_1=None): sep = [self.SEP] cls = [self.cls_token_id] if toks_1 is None: return len(cls + toks_0 + sep) * [0] return len(cls + toks_0 + sep + sep + toks_1 + sep) * [0]
{"/qnarre/prep/convert/xlnet.py": ["/qnarre/prep/config/xlnet.py", "/qnarre/models/xlnet.py"], "/qnarre/prep/convert/bert.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/base/doc/patcher.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/nominals.py"], "/qnarre/prep/convert/funnel.py": ["/qnarre/prep/config/funnel.py", "/qnarre/models/funnel.py"], "/qnarre/prep/tokens/fast/realm.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py", "/qnarre/tokens/utils.py"], "/qnarre/models/decision_transfo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/decision_transfo.py"], "/qnarre/models/fsmt.py": ["/qnarre/core/embed.py"], "/qnarre/models/roformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/xlnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc.py": ["/qnarre/base/author.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/ops/blocksparse/__init__.py": ["/tools/triton/python/triton/ops/blocksparse/softmax.py"], "/qnarre/base/doc/analyzer.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/contain.py"], "/qnarre/prep/tokens/mpnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/prophetnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/gpt2.py": ["/qnarre/core/mlp.py"], "/qnarre/models/old/convert.py": ["/qnarre/core/utils.py"], "/qnarre/prep/convert/mbart.py": ["/qnarre/prep/config/mbart.py"], "/tools/triton/python/triton/language/semantic.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/prep/tokens/dpr.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/junk.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/reader.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/mboxes.py"], "/qnarre/base/doc/context.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/recs.py", "/qnarre/base/doc/filters.py", "/qnarre/base/doc/content.py", "/qnarre/base/doc/category.py"], "/qnarre/prep/tokens/rembert.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/debugger/debugger.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py", "/tools/triton/python/triton/debugger/tl_lang.py"], "/qnarre/prep/convert/reformer.py": ["/qnarre/prep/config/reformer.py", "/qnarre/models/reformer.py"], "/qnarre/prep/tokens/perceiver.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/modeling_utils.py": ["/qnarre/core/utils.py"], "/qnarre/tokens/utils.py": ["/qnarre/tokens/base.py"], "/tools/triton/python/triton/language/__init__.py": ["/tools/triton/python/triton/language/standard.py", "/tools/triton/python/triton/language/core.py", "/tools/triton/python/triton/language/random.py"], "/qnarre/base/doc/contain.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/t5.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/t5.py"], "/tools/triton/python/triton/language/core.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/models/megatron.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/megatron.py"], "/qnarre/models/fnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/record.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py"], "/qnarre/base/doc/dispatch.py": ["/qnarre/base/doc/blog.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/context.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/analyzer.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/images.py"], "/qnarre/models/big_bird.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/big_bird.py"], "/tools/triton/python/triton/runtime/jit.py": ["/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/fast/roberta.py": ["/qnarre/tokens/base.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/bigbird.py": ["/qnarre/prep/config/big_bird.py", "/qnarre/models/big_bird.py"], "/qnarre/models/gpt_neo.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/gpt_neo.py"], "/qnarre/prep/convert/t5.py": ["/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/prep/tokens/fsmt.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/deberta.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/fast/splinter.py": ["/qnarre/tokens/fast.py"], "/qnarre/models/old/trafo.py": ["/qnarre/core/attention.py", "/qnarre/core/base.py", "/qnarre/core/mlp.py", "/qnarre/core/deduce.py", "/qnarre/core/norm.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/led.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/realm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/convbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/data2vec.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/tools/triton/python/triton/language/math.py": ["/tools/triton/python/triton/language/__init__.py"], "/qnarre/run/beam.py": ["/qnarre/run/qa.py"], "/tools/triton/python/triton/compiler/compiler.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/tools/disasm.py", "/tools/triton/python/triton/compiler/code_generator.py", "/tools/triton/python/triton/compiler/make_launcher.py"], "/qnarre/base/doc/graph.py": ["/qnarre/base/doc/base.py"], "/qnarre/base/activism.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/judgment.py"], "/qnarre/base/doc/exporter.py": ["/qnarre/base/doc/base.py"], "/qnarre/prep/tokens/fast/fnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/roformer.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/megatron.py": ["/qnarre/prep/config/megatron.py", "/qnarre/models/megatron.py"], "/qnarre/prep/tokens/fast/roformer.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/roformer.py"], "/qnarre/core/runner.py": ["/qnarre/core/params.py"], "/qnarre/run/seq2seq.py": ["/qnarre/run/qa.py"], "/qnarre/prep/tokens/luke.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/util/table.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/tree.py", "/qnarre/base/doc/util/utils.py"], "/tools/triton/python/triton/language/standard.py": ["/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/language/__init__.py"], "/qnarre/base/doc/filters.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/args.py"], "/qnarre/prep/convert/gpt_neo.py": ["/qnarre/prep/config/gpt_neo.py", "/qnarre/models/gpt_neo.py"], "/qnarre/base/doc/section.py": ["/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/message.py"], "/qnarre/models/funnel.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/funnel.py"], "/tools/triton/python/triton/common/__init__.py": ["/tools/triton/python/triton/common/build.py"], "/qnarre/base/doc/qnn.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/mboxes.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/models/plbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/reformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/images.py": ["/qnarre/base/doc/counter.py", "/qnarre/base/doc/base.py"], "/qnarre/models/yoso.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/canine.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/doc/blog.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/counter.py"], "/qnarre/models/longformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/fast/pegasus.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/pegasus.py"], "/qnarre/base/judgment.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py", "/qnarre/base/conflict.py", "/qnarre/base/conjecture.py"], "/qnarre/models/splinter.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/rag.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/convert/albert.py": ["/qnarre/prep/config/albert.py", "/qnarre/models/albert.py"], "/qnarre/prep/tokens/fast/gpt2.py": ["/qnarre/tokens/fast.py"], "/qnarre/prep/tokens/fast/mbart.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/prep/convert/roformer.py": ["/qnarre/models/roformer.py"], "/qnarre/base/doc/util/tree.py": ["/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py", "/qnarre/base/doc/util/utils.py"], "/qnarre/models/rembert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/t5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/tokens/xlm.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/bert_tf2.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/tools/triton/python/triton/ops/__init__.py": ["/tools/triton/python/triton/ops/cross_entropy.py", "/tools/triton/python/triton/ops/matmul.py"], "/qnarre/core/norm.py": ["/qnarre/core/base.py"], "/qnarre/models/mpnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/author.py": ["/qnarre/base/named.py"], "/qnarre/base/doc/args.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/log.py"], "/qnarre/prep/tokens/gpt.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/distilbert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/proof.py": ["/qnarre/base/claim.py", "/qnarre/base/narrative.py", "/qnarre/base/author.py"], "/qnarre/prep/tokens/byt5.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/convert/electra.py": ["/qnarre/prep/config/electra.py", "/qnarre/models/electra.py"], "/qnarre/models/luke.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/recs.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/chain.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/date.py", "/qnarre/base/doc/header.py"], "/tools/triton/python/triton/runtime/autotuner.py": ["/tools/triton/python/triton/testing.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/convert/roberta.py": ["/qnarre/prep/config/bert.py", "/qnarre/models/bert.py"], "/qnarre/core/test/deduce.py": ["/qnarre/core/utils.py", "/qnarre/core/embed.py"], "/qnarre/prep/tokens/fast/gpt_neox.py": ["/qnarre/tokens/fast.py"], "/qnarre/base/doc/nominals.py": ["/qnarre/base/doc/base.py"], "/qnarre/models/ibert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/resource.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/qnarre/core/search.py": ["/qnarre/core/base.py"], "/qnarre/prep/tokens/fast/gpt.py": ["/qnarre/tokens/fast.py", "/qnarre/prep/tokens/gpt.py"], "/qnarre/base/doc/header.py": ["/qnarre/base/doc/date.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/category.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/base.py"], "/qnarre/models/ctrl.py": ["/qnarre/core/embed.py", "/qnarre/prep/config/ctrl.py"], "/qnarre/prep/convert/byt5.py": ["/qnarre/prep/convert/t5.py", "/qnarre/prep/config/t5.py", "/qnarre/models/t5.py"], "/qnarre/base/doc/mboxes.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/reader.py", "/qnarre/base/doc/record.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/sanitizer.py", "/qnarre/base/doc/counter.py"], "/qnarre/prep/tokens/fast/deberta.py": ["/qnarre/tokens/base.py", "/qnarre/prep/tokens/fast/gpt2.py", "/qnarre/prep/tokens/deberta.py"], "/qnarre/core/test/attend.py": ["/qnarre/core/utils.py"], "/tools/triton/python/triton/compiler/code_generator.py": ["/tools/triton/python/triton/__init__.py", "/tools/triton/python/triton/language/__init__.py", "/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/models/old/bert2.py": ["/qnarre/core/norm.py"], "/qnarre/base/__init__.py": ["/qnarre/base/org.py", "/qnarre/base/proof.py", "/qnarre/base/net.py", "/qnarre/base/doc.py", "/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/named.py", "/qnarre/base/conflict.py", "/qnarre/base/judgment.py", "/qnarre/base/activism.py", "/qnarre/base/conjecture.py"], "/qnarre/models/t5.py": ["/qnarre/prep/config/t5.py"], "/qnarre/core/deduce.py": ["/qnarre/core/base.py", "/qnarre/core/search.py"], "/qnarre/models/old/bert.py": ["/qnarre/core/squad.py"], "/tools/triton/python/triton/compiler/__init__.py": ["/tools/triton/python/triton/compiler/compiler.py", "/tools/triton/python/triton/compiler/errors.py"], "/qnarre/base/doc/command.py": ["/qnarre/base/doc/qnn.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/args.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/dispatch.py"], "/qnarre/prep/convert/pegasus.py": ["/qnarre/prep/config/pegasus.py"], "/tools/triton/python/triton/ops/matmul.py": ["/tools/triton/python/triton/ops/matmul_perf_model.py"], "/tools/triton/python/triton/debugger/tl_lang.py": ["/tools/triton/python/triton/debugger/core.py", "/tools/triton/python/triton/debugger/memory_map.py"], "/qnarre/prep/tokens/marian.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/xlnet.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlnet.py"], "/qnarre/prep/tokens/deberta2.py": ["/qnarre/tokens/utils.py"], "/qnarre/core/pretrained.py": ["/qnarre/core/base.py"], "/qnarre/base/org.py": ["/qnarre/base/doc.py", "/qnarre/base/net.py", "/qnarre/base/stats.py", "/qnarre/base/named.py"], "/qnarre/models/transfo_xl.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/transfo_xl.py"], "/qnarre/models/roberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/roberta.py"], "/qnarre/base/doc/util/node.py": ["/qnarre/base/doc/util/row.py"], "/qnarre/models/dpr.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/prep/tokens/plbart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/part.py": ["/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py"], "/qnarre/prep/convert/bart.py": ["/qnarre/models/bart.py"], "/qnarre/models/albert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/albert.py"], "/qnarre/prep/tokens/fast/xlnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py"], "/qnarre/models/bart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/models/segformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/chain.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/header.py", "/qnarre/base/doc/counter.py", "/qnarre/base/doc/connect.py", "/qnarre/base/doc/exporter.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/date.py"], "/qnarre/base/conflict.py": ["/qnarre/base/claim.py", "/qnarre/base/author.py", "/qnarre/base/narrative.py", "/qnarre/base/conjecture.py"], "/qnarre/models/electra.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/electra.py"], "/qnarre/base/doc/util/utils.py": ["/qnarre/base/doc/util/item.py", "/qnarre/base/doc/util/row.py", "/qnarre/base/doc/util/node.py"], "/qnarre/base/doc/connect.py": ["/qnarre/base/doc/graph.py", "/qnarre/base/doc/base.py"], "/qnarre/prep/convert/gpt2.py": ["/qnarre/models/gpt2.py"], "/qnarre/base/doc/counter.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/fast/mpnet.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/fast.py", "/qnarre/prep/tokens/mpnet.py"], "/qnarre/base/doc/util/row.py": ["/qnarre/base/doc/util/item.py"], "/qnarre/models/gpt_neox.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py"], "/qnarre/base/claim.py": ["/qnarre/base/named.py"], "/tools/triton/python/triton/runtime/driver.py": ["/tools/triton/python/triton/common/build.py", "/tools/triton/python/triton/runtime/cache.py"], "/qnarre/models/deberta.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/models/xlm.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/xlm.py"], "/qnarre/base/conjecture.py": ["/qnarre/base/claim.py", "/qnarre/base/proof.py", "/qnarre/base/narrative.py"], "/tools/triton/python/triton/testing.py": ["/tools/triton/python/triton/runtime/__init__.py"], "/qnarre/tokens/fast.py": ["/qnarre/tokens/utils.py", "/qnarre/tokens/base.py"], "/qnarre/prep/tokens/bart.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/narrative.py": ["/qnarre/base/named.py"], "/qnarre/prep/convert/transfo_xl.py": ["/qnarre/prep/config/transfo_xl.py", "/qnarre/models/transfo_xl.py"], "/qnarre/base/doc/message.py": ["/qnarre/base/doc/nominals.py", "/qnarre/base/doc/justifier.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/realm.py", "/qnarre/base/doc/part.py"], "/qnarre/models/mbart.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/mbart.py"], "/qnarre/base/doc/content.py": ["/qnarre/base/doc/log.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/resource.py"], "/qnarre/prep/tokens/canine.py": ["/qnarre/tokens/utils.py"], "/qnarre/models/nystromformer.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/prep/tokens/pegasus.py": ["/qnarre/tokens/utils.py"], "/qnarre/base/doc/date.py": ["/qnarre/base/__init__.py"], "/tools/triton/python/triton/language/extra/cuda.py": ["/tools/triton/python/triton/language/__init__.py"], "/tools/triton/python/triton/__init__.py": ["/tools/triton/python/triton/runtime/__init__.py", "/tools/triton/python/triton/runtime/jit.py", "/tools/triton/python/triton/compiler/__init__.py", "/tools/triton/python/triton/debugger/debugger.py"], "/qnarre/prep/tokens/transfo_xl.py": ["/qnarre/tokens/utils.py"], "/tools/triton/python/triton/compiler/make_launcher.py": ["/tools/triton/python/triton/common/__init__.py", "/tools/triton/python/triton/runtime/cache.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/prep/tokens/prophetnet.py": ["/qnarre/tokens/utils.py"], "/qnarre/prep/config/roberta.py": ["/qnarre/prep/config/bert.py"], "/qnarre/base/doc/category.py": ["/qnarre/base/doc/junk.py", "/qnarre/base/doc/log.py", "/qnarre/base/doc/resource.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/part.py"], "/tools/triton/python/triton/runtime/__init__.py": ["/tools/triton/python/triton/runtime/autotuner.py", "/tools/triton/python/triton/runtime/driver.py", "/tools/triton/python/triton/runtime/jit.py"], "/qnarre/models/bert.py": ["/qnarre/core/embed.py", "/qnarre/core/mlp.py", "/qnarre/prep/config/bert.py"], "/qnarre/base/doc/realm.py": ["/qnarre/base/doc/exporter.py", "/qnarre/base/doc/nominals.py", "/qnarre/base/doc/base.py", "/qnarre/base/doc/meta.py", "/qnarre/base/doc/part.py"], "/qnarre/base/net.py": ["/qnarre/base/author.py", "/qnarre/base/claim.py", "/qnarre/base/named.py"], "/tools/triton/python/triton/language/random.py": ["/tools/triton/python/triton/language/__init__.py"]}